{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n )wrapper\",\n {{\"$DOTS\", dot_graphs_compressed},\n {\"$FRAMES\", frames},\n {\"$TITLE\", graph_title}});\n}\nstatic std::string GraphTitle(const HloComputation& computation) {\n return absl::StrCat(computation.parent()->name(), \"_\", computation.name());\n}\nabsl::StatusOr WrapFusionExplorer(\n const HloComputation& computation) {\n absl::MutexLock lock(&fusion_visualizer_state_mu);\n const FusionVisualizerProgress& visualizer_progress =\n fusion_visualizer_states[FusionVisualizerStateKey(computation)];\n return WrapFusionExplorer(visualizer_progress, GraphTitle(computation));\n}\nstatic absl::StatusOr WrapDotInHtml(absl::string_view dot,\n absl::string_view title) {\n FusionVisualizerProgress progress;\n progress.AddState(dot, title, std::nullopt);\n return WrapFusionExplorer(progress, title);\n}\nstatic absl::StatusOr WrapDotInFormat(\n const HloComputation& computation, absl::string_view dot,\n RenderedGraphFormat format) ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {\n switch (format) {\n case RenderedGraphFormat::kUrl:\n CHECK(url_renderer != nullptr)\n << \"Should have checked url_renderer != null before calling.\";\n return (*url_renderer)(dot);\n case RenderedGraphFormat::kHtml:\n return WrapDotInHtml(dot, GraphTitle(computation));\n case RenderedGraphFormat::kDot:\n return std::string(dot);\n }\n}\nvoid RegisterGraphToURLRenderer(\n std::function(absl::string_view)> renderer) {\n absl::MutexLock lock(&url_renderer_mu);\n if (url_renderer != nullptr) {\n LOG(WARNING) << \"Multiple calls to RegisterGraphToURLRenderer. Last call \"\n \"wins, but because order of initialization in C++ is \"\n \"nondeterministic, this may not be what you want.\";\n }\n delete url_renderer;\n url_renderer =\n new std::function(absl::string_view)>(\n std::move(renderer));\n}\nvoid RegisterFusionState(const HloComputation& computation,\n absl::string_view label,\n const HloInstruction& consumer,\n const HloInstruction* producer) {\n absl::MutexLock lock(&fusion_visualizer_state_mu);\n FusionVisualizerProgress& fusion_progress =\n fusion_visualizer_states[FusionVisualizerStateKey(computation)];\n static constexpr int kRenderRadius = 4;\n absl::flat_hash_set render_boundary;\n for (const HloInstruction* user : consumer.users()) {\n render_boundary.insert(user);\n }\n HloDotDumper dumper(\n consumer.parent(),\n StrCat(\"Rendering of \", kRenderRadius, \" nodes around fusion consumer\"),\n consumer.GetModule()->config().debug_options(), {},\n MakeNodeRadiusAroundFilter(&consumer, kRenderRadius, render_boundary));\n std::string dot_txt = dumper.Dump();\n std::optional producer_to_highlight;\n if (producer) {\n producer_to_highlight = dumper.CssIdForInstruction(*producer);\n }\n fusion_progress.AddState(dot_txt, label, producer_to_highlight);\n}\nabsl::StatusOr RenderGraph(\n const HloComputation& computation, absl::string_view label,\n const DebugOptions& debug_options, RenderedGraphFormat format,\n HloRenderOptions hlo_render_options,\n std::optional>\n color_map) {\n absl::MutexLock lock(&url_renderer_mu);\n if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {\n return Unavailable(\"Can't render as URL; no URL renderer was registered.\");\n }\n std::string rendered_dot =\n HloDotDumper(&computation, label, debug_options, hlo_render_options,\n NodeFilter(), color_map)\n .Dump();\n return WrapDotInFormat(computation, rendered_dot, format);\n}\nabsl::StatusOr RenderAllComputationsToHtml(\n const HloModule& module) {\n FusionVisualizerProgress progress;\n std::vector instrs =\n module.entry_computation()->MakeInstructionPostOrder();\n absl::c_reverse(instrs);\n for (const HloInstruction* instr : instrs) {\n if (absl::c_linear_search(\n std::vector{HloOpcode::kConstant,\n HloOpcode::kGetTupleElement},\n instr->opcode())) {\n continue;\n }\n HloRenderOptions opts;\n opts.show_fusion_subcomputations = true;\n opts.show_backend_config = true;\n opts.show_while_subcomputations = instr->opcode() == HloOpcode::kWhile;\n static constexpr int64_t max_nodes_to_render = 100;\n absl::flat_hash_set render_boundary;\n NodeFilter filter = MakeNodeRadiusAroundFilter(instr, 2, render_boundary);\n if (filter.GetNumRendered().value_or(1) > max_nodes_to_render) {\n filter = MakeNodeRadiusAroundFilter(instr, 1, render_boundary);\n }\n std::string dot =\n HloDotDumper(module.entry_computation(), instr->name(),\n module.config().debug_options(), opts, filter)\n .Dump();\n progress.AddState(dot, instr->name(), std::nullopt);\n }\n return WrapFusionExplorer(progress, module.name());\n}\nabsl::StatusOr RenderNeighborhoodAround(\n const HloInstruction& node, int radius, RenderedGraphFormat format,\n HloRenderOptions hlo_render_options,\n const absl::flat_hash_set& boundary,\n std::optional>\n color_map) {\n absl::MutexLock lock(&url_renderer_mu);\n if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {\n return FailedPrecondition(\n \"Can't render as URL; no URL renderer was registered.\");\n }\n std::string label =\n StrCat(\"Neighborhood of \", radius, \" nodes around \", node.name());\n std::string rendered_dot =\n HloDotDumper(\n node.parent(), label, node.GetModule()->config().debug_options(),\n hlo_render_options,\n MakeNodeRadiusAroundFilter(&node, radius, boundary), color_map)\n .Dump();\n return WrapDotInFormat(*node.parent(), rendered_dot, format);\n}\nabsl::StatusOr RenderAllPathsFromTo(\n const HloInstruction& from, const HloInstruction& to, int64_t max_nodes,\n RenderedGraphFormat format, HloRenderOptions hlo_render_options) {\n absl::MutexLock lock(&url_renderer_mu);\n if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {\n return FailedPrecondition(\n \"Can't render as URL; no URL renderer was registered.\");\n }\n CHECK_EQ(from.parent(), to.parent()) << \"Nodes must be in same computation!\";\n auto debug_options = from.GetModule()->config().debug_options();\n bool hit_limit = false;\n NodeFilter filter = MakeNodeFromToFilter(&from, &to, max_nodes, &hit_limit);\n std::string label;\n if (!hit_limit) {\n label = StrCat(\"All paths from \", from.name(), \" to \", to.name());\n } else {\n label = StrCat(max_nodes, \" nodes on the shortest paths from \", from.name(),\n \" to \", to.name(),\n \"

***SHOWING ONLY A SUBSET OF ALL PATHS BETWEEN \"\n \"NODES***

\");\n }\n std::string rendered_dot = HloDotDumper(from.parent(), label, debug_options,\n hlo_render_options, filter)\n .Dump();\n return WrapDotInFormat(*from.parent(), rendered_dot, format);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_graph_dumper.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/test_utils.h\"\n#include \"xla/xla.pb.h\"\nnamespace xla {\nnamespace {\nusing absl::StrCat;\nusing ::testing::HasSubstr;\nusing HloGraphDumperTest = HloTestBase;\nstd::string TestName() {\n return ::testing::UnitTest::GetInstance()->current_test_info()->name();\n}\nTEST_F(HloGraphDumperTest, NestedFusion) {\n HloComputation::Builder b(\"b\");\n auto shape = ShapeUtil::MakeShape(F32, {10, 100});\n std::vector params;\n for (int i = 0; i <= 4; ++i) {\n params.push_back(b.AddInstruction(\n HloInstruction::CreateParameter(i, shape, StrCat(\"param\", i))));\n }\n std::vector sums;\n sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(\n shape, HloOpcode::kAdd, params[0], params[1])));\n for (int i = 0; i <= 2; ++i) {\n sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(\n shape, HloOpcode::kAdd, sums[i], params[i + 2])));\n }\n HloModuleConfig config;\n HloModule m(TestName(), config);\n m.AddEntryComputation(b.Build());\n HloComputation* root_computation = m.entry_computation();\n auto* outer_fusion = root_computation->CreateFusionInstruction(\n {sums[3], sums[2], sums[1], sums[0]}, HloInstruction::FusionKind::kLoop);\n std::vector fused_sums;\n for (auto* instr : outer_fusion->fused_instructions_computation()\n ->MakeInstructionPostOrder()) {\n if (instr->opcode() == HloOpcode::kAdd) {\n fused_sums.push_back(instr);\n }\n }\n auto* inner_fusion =\n outer_fusion->fused_instructions_computation()->CreateFusionInstruction(\n {fused_sums[1], fused_sums[0]}, HloInstruction::FusionKind::kLoop);\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*root_computation, \"\", DebugOptions(),\n RenderedGraphFormat::kDot));\n for (const HloComputation* computation :\n {root_computation, \n inner_fusion->fused_instructions_computation(),\n outer_fusion->fused_instructions_computation()}) {\n for (const HloInstruction* instruction : computation->instructions()) {\n EXPECT_THAT(graph, HasSubstr(instruction->name()));\n }\n }\n const HloInstruction* inner_sum = nullptr;\n for (const HloInstruction* instruction :\n inner_fusion->fused_instructions_computation()->instructions()) {\n if (instruction->opcode() == HloOpcode::kAdd) {\n inner_sum = instruction;\n break;\n }\n }\n ASSERT_NE(inner_sum, nullptr);\n TF_ASSERT_OK_AND_ASSIGN(std::string neighborhood_graph,\n RenderNeighborhoodAround(*inner_sum, 1,\n RenderedGraphFormat::kDot));\n EXPECT_THAT(neighborhood_graph, HasSubstr(inner_sum->name()));\n}\nTEST_F(HloGraphDumperTest, Constant) {\n HloComputation::Builder b(\"b\");\n auto instruction = b.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(-42)));\n instruction->SetAndSanitizeName(\"i_am_a_constant_root_instruction\");\n HloModuleConfig config;\n HloModule m(TestName(), config);\n HloComputation* root_computation = m.AddEntryComputation(b.Build());\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*root_computation, \"an_empty_graph\", DebugOptions(),\n RenderedGraphFormat::kDot));\n EXPECT_THAT(graph, HasSubstr(\"an_empty_graph\"));\n}\nTEST_F(HloGraphDumperTest, TupleConstant) {\n Shape tuple_shape = ShapeUtil::MakeTupleShape(\n {ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(S32, {4, 5})});\n HloComputation::Builder b(\"b\");\n auto constant = b.AddInstruction(\n HloInstruction::CreateConstant(Literal::CreateFromShape(tuple_shape)));\n auto gte = b.AddInstruction(HloInstruction::CreateGetTupleElement(\n ShapeUtil::MakeShape(F32, {3, 2}), constant, 0));\n HloModuleConfig config;\n HloModule m(TestName(), config);\n HloComputation* root_computation = m.AddEntryComputation(b.Build(gte));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*root_computation, \"tuple_constant\", DebugOptions(),\n RenderedGraphFormat::kDot));\n EXPECT_THAT(graph, HasSubstr(\"tuple_constant\"));\n EXPECT_THAT(graph, HasSubstr(\"constant (f32[3,2], s32[4,5])\"));\n}\nTEST_F(HloGraphDumperTest, Compare) {\n const char* hlo_string = R\"(\n HloModule comp\n ENTRY comp {\n param.0 = f32[10] parameter(0)\n param.1 = f32[10] parameter(1)\n ROOT lt = pred[10] compare(param.0, param.1), direction=LT\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot));\n EXPECT_THAT(graph, HasSubstr(\"direction=LT\"));\n}\nTEST_F(HloGraphDumperTest, HasStatisticsViz) {\n const char* hlo_string = R\"(\n HloModule comp\n ENTRY comp {\n param.0 = f32[10] parameter(0), statistics={visualizing_index=0,stat-0=0.5}\n param.1 = f32[10] parameter(1), statistics={visualizing_index=1,stat-0=55.5,stat-1=44.4}\n ROOT lt = pred[10] compare(param.0, param.1), direction=LT\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot));\n}\nTEST_F(HloGraphDumperTest, RootIsConstant) {\n const char* hlo_string = R\"(\nHloModule indexed_conditional\n%then_branch (empty: ()) -> f32[] {\n %empty = () parameter(0)\n ROOT %then = f32[] constant(1)\n}\n%else_branch (empty.1: ()) -> f32[] {\n %empty.1 = () parameter(0)\n ROOT %else = f32[] constant(2)\n}\nENTRY %conditional_select (constant: pred[]) -> (f32[]) {\n %constant = pred[] parameter(0)\n %emptytuple = () tuple()\n %conditional = f32[] conditional(pred[] %constant, () %emptytuple, () %emptytuple), true_computation=%then_branch, false_computation=%else_branch\n ROOT %t = (f32[]) tuple(f32[] %conditional)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot));\n}\nTEST_F(HloGraphDumperTest, OverrideColors) {\n const char* hlo_string = R\"(\n HloModule comp\n ENTRY comp {\n param.0 = f32[10] parameter(0)\n param.1 = f32[10] parameter(1)\n ROOT lt = pred[10] compare(param.0, param.1), direction=LT\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n absl::flat_hash_map color_map;\n ColorStats color_stats_1;\n color_stats_1.color = \"#A9C343\";\n color_stats_1.stats = absl::StrFormat(\"%.3f\", 1.11);\n ColorStats color_stats_2;\n color_stats_2.color = \"#BC8A3F\";\n color_stats_2.stats = absl::StrFormat(\"%.3f\", 2.22);\n color_map[module->entry_computation()->GetInstructionWithName(\"param.0\")] =\n color_stats_1;\n color_map[module->entry_computation()->GetInstructionWithName(\"param.1\")] =\n color_stats_2;\n HloRenderOptions hlo_render_options;\n hlo_render_options.override_node_colors = true;\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot, hlo_render_options,\n color_map));\n EXPECT_THAT(graph, HasSubstr(\"#A9C343\"));\n EXPECT_THAT(graph, HasSubstr(\"1.110\"));\n EXPECT_THAT(graph, HasSubstr(\"#BC8A3F\"));\n EXPECT_THAT(graph, HasSubstr(\"2.220\"));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1121,"cells":{"ID":{"kind":"string","value":"10bb58ac-bb24-4434-b6f2-4ba070e2f308"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reduce_scatter_reassociate"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/reduce_scatter_reassociate.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/reduce_scatter_reassociate_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/reduce_scatter_reassociate.h\"\n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/service/all_reduce_key.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/hlo_domain_map.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nnamespace {\nbool AreCompatible(const HloReduceScatterInstruction *rs0,\n const HloReduceScatterInstruction *rs1,\n ReductionKind op_kind) {\n std::optional key0 = GetAllReduceKey(rs0);\n std::optional key1 = GetAllReduceKey(rs1);\n auto kind0 = MatchReductionComputation(rs0->to_apply());\n auto dims_match = rs0->scatter_dimension() == rs1->scatter_dimension();\n return key0 && key1 && kind0 && *key0 == *key1 && kind0 == op_kind &&\n dims_match;\n}\n} \nabsl::StatusOr ReduceScatterReassociate::Run(\n HloModule *module,\n const absl::flat_hash_set &execution_threads) {\n if (hlo_query::ContainsLayoutConstrainedCollective(\n *module, HloOpcode::kReduceScatter)) {\n VLOG(1)\n << \"Skip ReduceScatterReassociate because the module contains reduce-\"\n \"scatter with constrained layouts\";\n return false;\n }\n int64_t next_channel_id = hlo_query::NextChannelId(*module);\n bool changed = false;\n for (auto computation : module->computations(execution_threads)) {\n for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {\n std::optional kind = MatchReductionInstruction(inst);\n if (!kind || inst->operand(0)->opcode() != HloOpcode::kReduceScatter ||\n inst->operand(1)->opcode() != HloOpcode::kReduceScatter ||\n !inst->shape().IsArray()) {\n continue;\n }\n auto *rs0 = Cast(inst->mutable_operand(0));\n auto *rs1 = Cast(inst->mutable_operand(1));\n if (!AreCompatible(rs0, rs1, *kind)) {\n VLOG(2) << \"Reduce-Scatter operations are not compatible, skipping\";\n continue;\n }\n if (rs0->user_count() != 1 || rs1->user_count() != 1) {\n VLOG(2) << \"Reduce-Scatter operations have > 1 users\";\n continue;\n }\n HloInstruction *new_op =\n computation->AddInstruction(inst->CloneWithNewOperands(\n rs0->mutable_operand(0)->shape(),\n {rs0->mutable_operand(0), rs1->mutable_operand(0)}));\n HloInstruction *new_rs = computation->AddInstruction(\n rs0->CloneWithNewOperands(inst->shape(), {new_op}));\n if (new_rs->channel_id()) {\n new_rs->set_channel_id(next_channel_id++);\n }\n TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_rs));\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs0));\n if (rs0 != rs1) {\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs1));\n }\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/reduce_scatter_reassociate.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nnamespace m = xla::testing::opcode_matchers;\nclass ReduceScatterReassociateTest : public HloTestBase {\n public:\n absl::StatusOr> RunPass(\n absl::string_view hlo_module, bool expect_change) {\n TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));\n auto changed = ReduceScatterReassociate().Run(module.get());\n if (!changed.ok()) {\n return changed.status();\n }\n EXPECT_EQ(changed.value(), expect_change);\n return absl::StatusOr>(std::move(module));\n }\n size_t ReduceScatterCount(std::unique_ptr& module) {\n return absl::c_count_if(module->entry_computation()->instructions(),\n HloPredicateIsOp);\n }\n};\nTEST_F(ReduceScatterReassociateTest, Simple) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum\n ROOT add = f32[4] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::ReduceScatter(m::Add(m::Parameter(0), m::Parameter(1))));\n EXPECT_EQ(ReduceScatterCount(module), 1);\n}\nTEST_F(ReduceScatterReassociateTest, SimpleWithConstrainLayout) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, constrain_layout=true, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, constrain_layout=true, to_apply=sum\n ROOT add = f32[4] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, SimpleChain) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n p2 = f32[8] parameter(2)\n p3 = f32[8] parameter(3)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum\n rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum\n rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum\n add0 = f32[4] add(rs0, rs1)\n add1 = f32[4] add(add0, rs2)\n ROOT add2 = f32[4] add(add1, rs3)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n m::ReduceScatter(m::Add(\n m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),\n m::Parameter(3))));\n EXPECT_EQ(ReduceScatterCount(module), 1);\n}\nTEST_F(ReduceScatterReassociateTest, SimpleTree) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n p2 = f32[8] parameter(2)\n p3 = f32[8] parameter(3)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum\n rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum\n rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum\n add0 = f32[4] add(rs0, rs1)\n add1 = f32[4] add(rs2, rs3)\n ROOT add2 = f32[4] add(add0, add1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n m::ReduceScatter(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),\n m::Add(m::Parameter(2), m::Parameter(3)))));\n EXPECT_EQ(ReduceScatterCount(module), 1);\n}\nTEST_F(ReduceScatterReassociateTest, MismatchOp0) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nmax {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT r = f32[] maximum(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max\n ROOT add = f32[4] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, MismatchOp1) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nmax {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT r = f32[] maximum(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=max\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max\n ROOT add = f32[4] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, MismatchDimension) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8,8] parameter(0)\n p1 = f32[8,8] parameter(1)\n rs0 = f32[8,8] reduce-scatter(p0), dimensions={0}, to_apply=sum\n rs1 = f32[8,8] reduce-scatter(p1), dimensions={1}, to_apply=sum\n ROOT add = f32[8,8] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, MismatchReplicaGroups) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0}}, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={}, to_apply=sum\n ROOT add = f32[4] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, MismatchHasChannelId) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, channel_id=3, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum\n ROOT add = f32[4] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, MismatchUseGlobalDeviceId) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1}}, channel_id=3, use_global_device_ids=true, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={{0,1}}, channel_id=4, to_apply=sum\n ROOT add = f32[4] add(rs0, rs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, NotSingleUser) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum\n rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum\n add = f32[4] add(rs0, rs1)\n ROOT t = (f32[4], f32[4]) tuple(rs0, add)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(ReduceScatterReassociateTest, DoubleUse) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum\n add = f32[4] add(rs0, rs0)\n ROOT c = f32[4] copy(add)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1122,"cells":{"ID":{"kind":"string","value":"3bc61d67-4a76-418b-a671-c9c315c7a6bd"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"change_op_data_type"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/change_op_data_type.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/change_op_data_type_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/change_op_data_type.h\"\n#include \n#include \"xla/service/hlo_creation_utils.h\"\n#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)\n#include \"xla/service/cpu/onednn_contraction_rewriter.h\"\n#endif \nnamespace xla {\nnamespace {\nstd::optional GetUniformOperandType(\n const HloInstruction* instr) {\n std::optional type;\n for (const HloInstruction* operand : instr->operands()) {\n if (!type.has_value()) {\n type = operand->shape().element_type();\n } else if (operand->shape().element_type() != type.value()) {\n return std::nullopt;\n }\n }\n return type;\n}\n} \nabsl::StatusOr ChangeOpDataType::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n HloCloner default_cloner = [](const HloInstruction* inst, const Shape& shape,\n absl::Span operands) {\n return inst->CloneWithNewOperands(shape, operands);\n };\n HloCloner cloner = cloner_ ? cloner_ : default_cloner;\n for (HloComputation* comp :\n module->MakeNonfusionComputations(execution_threads)) {\n for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {\n std::optional operand_type = GetUniformOperandType(instr);\n if (!op_matcher_(instr) || !operand_type.has_value() ||\n !instr->shape().IsArray() ||\n instr->opcode() == HloOpcode::kParameter) {\n continue;\n }\n const PrimitiveType from_type = *operand_type;\n auto it = to_type_map_.find(from_type);\n if (it == to_type_map_.end()) {\n continue;\n }\n#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)\n if (cpu::OneDnnContractionRewriter::ShouldRewriteInstr(instr, true)) {\n continue;\n }\n#endif \n const PrimitiveType to_type = it->second;\n absl::InlinedVector new_operands;\n for (HloInstruction* operand : instr->mutable_operands()) {\n new_operands.push_back(MakeConvertToHlo(operand, to_type));\n }\n Shape new_shape = instr->shape();\n new_shape.set_element_type(to_type);\n HloInstruction* new_instr =\n comp->AddInstruction(cloner(instr, new_shape, new_operands));\n TF_RETURN_IF_ERROR(comp->ReplaceInstruction(\n instr, MakeConvertToHlo(new_instr, from_type)));\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/change_op_data_type.h\"\n#include \n#include \n#include \n#include \"absl/types/span.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nnamespace m = ::xla::match;\nclass ChangeOpDataTypeTest : public HloTestBase {\n public:\n ChangeOpDataTypeTest()\n : HloTestBase(false,\n false) {}\n};\nTEST_F(ChangeOpDataTypeTest, Simple) {\n const char* const kModuleStr = R\"(\n HloModule module\n ENTRY entry {\n ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ChangeOpDataType pass(F16, F32, HloPredicateTrue);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(\n m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),\n m::Convert(m::Parameter(1)).WithShape(F32, {10})))\n .WithShape(F16, {10})));\n}\nTEST_F(ChangeOpDataTypeTest, AllTypesMustBeSame) {\n const char* const kModuleStr = R\"(\n HloModule module\n ENTRY entry {\n ROOT op = f16[1] dynamic-slice(f16[10] parameter(0), s32[1] parameter(1)), dynamic_slice_sizes={1}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ChangeOpDataType pass(F16, F32, HloPredicateTrue);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_FALSE(changed);\n}\nTEST_F(ChangeOpDataTypeTest, DotAndConv) {\n const char* const kModuleStr = R\"(\n HloModule module\n ENTRY entry {\n dot = f16[10,10] dot(f16[10,10] parameter(0), f16[10,10] parameter(1)),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n conv = f16[1,2,1] convolution(f16[1,2,1] parameter(2), f16[1,1,1] parameter(3)),\n window={size=1}, dim_labels=b0f_0io->b0f\n root = tuple(dot, conv)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ChangeOpDataType pass(\n F16, F32, HloPredicateIsOp);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Convert(\n m::Dot(m::Convert(m::Parameter(0)).WithShape(F32, {10, 10}),\n m::Convert(m::Parameter(1)).WithShape(F32, {10, 10})))\n .WithShape(F16, {10, 10}),\n m::Convert(m::Convolution(\n m::Convert(m::Parameter(2)).WithShape(F32, {1, 2, 1}),\n m::Convert(m::Parameter(3)).WithShape(F32, {1, 1, 1})))\n .WithShape(F16, {1, 2, 1}))));\n}\nTEST_F(ChangeOpDataTypeTest, SimpleWithCloner) {\n const char* const kModuleStr = R\"(\n HloModule module\n ENTRY entry {\n ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloPredicate matcher = HloPredicateTrue;\n int count = 0;\n ChangeOpDataType::HloCloner cloner =\n [&count](const HloInstruction* instr, const Shape& shape,\n absl::Span operands) {\n count++;\n return instr->CloneWithNewOperands(shape, operands);\n };\n ChangeOpDataType pass(F16, F32, matcher, cloner);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_EQ(count, 1);\n}\nTEST_F(ChangeOpDataTypeTest, SimpleWithMultipleTypes) {\n const char* const kModuleStr = R\"(\n HloModule module\n ENTRY entry {\n op1 = add(f16[10] parameter(0), f16[10] parameter(1))\n op2 = add(u16[10] parameter(2), u16[10] parameter(3))\n ROOT tup = (f16[10], u16[10]) tuple(op1, op2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloPredicate matcher = HloPredicateTrue;\n ChangeOpDataType pass({{F16, F32}, {U16, U32}}, matcher);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_EQ(root->opcode(), HloOpcode::kTuple);\n EXPECT_EQ(root->operand_count(), 2);\n EXPECT_THAT(\n root->operand(0),\n GmockMatch(\n m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),\n m::Convert(m::Parameter(1)).WithShape(F32, {10})))\n .WithShape(F16, {10})));\n EXPECT_THAT(\n root->operand(1),\n GmockMatch(\n m::Convert(m::Add(m::Convert(m::Parameter(2)).WithShape(U32, {10}),\n m::Convert(m::Parameter(3)).WithShape(U32, {10})))\n .WithShape(U16, {10})));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1123,"cells":{"ID":{"kind":"string","value":"2292926a-6678-41a1-8dd4-0c5f8f61af9c"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"collective_permute_decomposer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/collective_permute_decomposer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/collective_permute_decomposer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/collective_permute_decomposer.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/strings/str_join.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/graphcycles/graphcycles.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nnamespace {\nusing SourceTargetPair = std::pair;\nusing SourceTargetPairs = std::vector;\nbool HasCycles(const SourceTargetPairs& pairs) {\n GraphCycles graph;\n absl::flat_hash_map replica_to_node_id;\n auto get_node_id = [&](int64_t replica) {\n auto it_and_inserted = replica_to_node_id.emplace(replica, -1);\n auto it = it_and_inserted.first;\n auto inserted = it_and_inserted.second;\n if (inserted) {\n it->second = graph.NewNode();\n }\n return it->second;\n };\n for (auto pair : pairs) {\n auto source = get_node_id(pair.first);\n auto target = get_node_id(pair.second);\n VLOG(3) << \"See source \" << source << \" -> target \" << target;\n if (!graph.InsertEdge(source, target)) {\n VLOG(3) << \"Detected cycles\";\n return true;\n }\n }\n return false;\n}\nbool ShouldDecompose(const HloCollectivePermuteInstruction& collective_permute,\n int64_t threshold_in_bytes) {\n if (!collective_permute.channel_id().has_value()) {\n return false;\n }\n const Shape& result_shape = collective_permute.shape();\n if (!result_shape.IsArray()) {\n return false;\n }\n if (ShapeUtil::ByteSizeOf(result_shape) < threshold_in_bytes) {\n return false;\n }\n return !HasCycles(collective_permute.source_target_pairs());\n}\nbool MayPipeline(const HloCollectivePermuteInstruction& collective_permute) {\n const HloInstruction* data = collective_permute.operand(0);\n return (data->opcode() == HloOpcode::kGetTupleElement &&\n data->operand(0)->opcode() == HloOpcode::kParameter);\n}\nabsl::Status DecomposeCollectivePermute(\n HloCollectivePermuteInstruction* collective_permute,\n HloComputation* computation, const std::string& pipeline_decision) {\n int64_t channel_id = collective_permute->channel_id().value();\n HloInstruction* data = collective_permute->mutable_operand(0);\n const Shape& data_shape = data->shape();\n const OpMetadata& metadata = collective_permute->metadata();\n const xla::FrontendAttributes& old_attributes =\n collective_permute->frontend_attributes();\n xla::FrontendAttributes attributes;\n std::string source_target_pairs_string =\n \"{\" +\n absl::StrJoin(collective_permute->source_target_pairs(), \",\",\n absl::PairFormatter(\n [](std::string* out, int64_t value) {\n absl::StrAppend(out, \"{\", value);\n },\n \",\",\n [](std::string* out, int64_t value) {\n absl::StrAppend(out, value, \"}\");\n })) +\n \"}\";\n attributes.mutable_map()->insert(old_attributes.map().begin(),\n old_attributes.map().end());\n (*attributes.mutable_map())[kSendRecvSourceTargetPairsAttr] =\n source_target_pairs_string;\n HloInstruction* after_all =\n computation->AddInstruction(HloInstruction::CreateToken());\n HloInstruction* recv = computation->AddInstruction(\n HloInstruction::CreateRecv(data_shape, after_all, channel_id));\n recv->add_frontend_attributes(attributes);\n recv->set_metadata(metadata);\n HloInstruction* send = computation->AddInstruction(\n HloInstruction::CreateSend(data, after_all, channel_id));\n send->add_frontend_attributes(attributes);\n send->set_metadata(metadata);\n HloInstruction* recv_done =\n computation->AddInstruction(HloInstruction::CreateRecvDone(recv));\n HloInstruction* send_done =\n computation->AddInstruction(HloInstruction::CreateSendDone(send));\n TF_RETURN_IF_ERROR(send->AddControlDependencyTo(recv_done));\n HloInstruction* recv_data = computation->AddInstruction(\n HloInstruction::CreateGetTupleElement(recv_done, 0));\n TF_RETURN_IF_ERROR(collective_permute->ReplaceAllUsesWith(recv_data));\n TF_RETURN_IF_ERROR(\n computation->RemoveInstructionAndUnusedOperands(collective_permute));\n if (!pipeline_decision.empty()) {\n xla::FrontendAttributes attributes;\n (*attributes.mutable_map())[kSendRecvPipelineAttr] = pipeline_decision;\n send->add_frontend_attributes(attributes);\n send_done->add_frontend_attributes(attributes);\n recv->add_frontend_attributes(attributes);\n recv_done->add_frontend_attributes(attributes);\n }\n return absl::OkStatus();\n}\nbool IsForwardCycle(const SourceTargetPair& backedge,\n const SourceTargetPairs& others) {\n int64_t num_pairs = others.size() + 1;\n if (backedge.first != num_pairs - 1 || backedge.second != 0) {\n return false;\n }\n for (int64_t i = 0; i < num_pairs - 1; ++i) {\n const SourceTargetPair& pair = others[i];\n if (pair.first != i || pair.second != i + 1) {\n return false;\n }\n }\n return true;\n}\nbool IsBackwardCycle(const SourceTargetPair& backedge,\n const SourceTargetPairs& others) {\n int64_t num_pairs = others.size() + 1;\n if (backedge.first != 0 || backedge.second != num_pairs - 1) {\n return false;\n }\n for (int64_t i = 0; i < num_pairs - 1; ++i) {\n const SourceTargetPair& pair = others[i];\n if (pair.first != i + 1 || pair.second != i) {\n return false;\n }\n }\n return true;\n}\nstd::optional>\nCheckCyclePatterns(HloCollectivePermuteInstruction* cp0,\n HloCollectivePermuteInstruction* cp1) {\n const SourceTargetPairs& cp0_pairs = cp0->source_target_pairs();\n const SourceTargetPairs& cp1_pairs = cp1->source_target_pairs();\n if (cp0_pairs.size() == 1) {\n if (IsForwardCycle(cp0_pairs.front(), cp1_pairs) ||\n IsBackwardCycle(cp0_pairs.front(), cp1_pairs)) {\n return std::make_pair(cp0, cp1);\n }\n }\n if (cp1_pairs.size() == 1) {\n if (IsForwardCycle(cp1_pairs.front(), cp0_pairs) ||\n IsBackwardCycle(cp1_pairs.front(), cp0_pairs)) {\n return std::make_pair(cp1, cp0);\n }\n }\n return std::nullopt;\n}\n} \nabsl::StatusOr CollectivePermuteDecomposer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n std::vector all_computations =\n module->MakeComputationPostOrder(execution_threads);\n absl::flat_hash_set while_bodies;\n for (auto iter = all_computations.rbegin(); iter != all_computations.rend();\n ++iter) {\n HloComputation* computation = *iter;\n bool may_pipeline = while_bodies.contains(computation);\n std::vector cps_to_decompose;\n HloCollectivePermuteInstruction* cp0_to_pipeline = nullptr;\n HloCollectivePermuteInstruction* cp1_to_pipeline = nullptr;\n for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {\n if (hlo->opcode() == HloOpcode::kWhile) {\n while_bodies.insert(hlo->while_body());\n continue;\n }\n if (hlo->opcode() != HloOpcode::kCollectivePermute) {\n continue;\n }\n HloCollectivePermuteInstruction* cp =\n Cast(hlo);\n if (!ShouldDecompose(*cp, threshold_in_bytes_)) {\n continue;\n }\n cps_to_decompose.push_back(cp);\n if (!while_bodies.contains(computation) || !may_pipeline) {\n continue;\n }\n if (cp0_to_pipeline != nullptr && cp1_to_pipeline != nullptr) {\n continue;\n }\n if (!MayPipeline(*cp)) {\n continue;\n }\n if (cp0_to_pipeline == nullptr) {\n cp0_to_pipeline = cp;\n continue;\n }\n auto optional_pair = CheckCyclePatterns(cp0_to_pipeline, cp);\n if (optional_pair.has_value()) {\n cp0_to_pipeline = optional_pair.value().first;\n cp1_to_pipeline = optional_pair.value().second;\n }\n }\n for (HloCollectivePermuteInstruction* cp : cps_to_decompose) {\n std::string pipeline_decision;\n if (cp0_to_pipeline == cp) {\n pipeline_decision = \"0\";\n } else if (cp1_to_pipeline == cp) {\n pipeline_decision = \"1\";\n }\n TF_RETURN_IF_ERROR(\n DecomposeCollectivePermute(cp, computation, pipeline_decision));\n }\n if (!cps_to_decompose.empty()) {\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/collective_permute_decomposer.h\"\n#include \n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nusing ::testing::HasSubstr;\nnamespace op = xla::testing::opcode_matchers;\nusing CollectivePermuteDecomposerTest = HloTestBase;\nTEST_F(CollectivePermuteDecomposerTest, WithCycleNotTransformed) {\n const absl::string_view kModuleStr = R\"(\n HloModule test\n ENTRY test_computation {\n p = u32[] replica-id()\n ROOT cp = u32[] collective-permute(p), channel_id=1,\n source_target_pairs={{0,1}, {1,0}}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectivePermuteDecomposerTest, WithContextDataNotTransformed) {\n const char* const kModuleStr = R\"(\n HloModule test\n ENTRY test_computation {\n p = u32[] replica-id()\n ROOT cp = (u32[], u32[], u32[], u32[]) collective-permute(p), channel_id=1,\n source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectivePermuteDecomposerTest, TransformedExplicitChannelId) {\n const char* const kModuleStr = R\"(\n HloModule test\n ENTRY test_computation {\n p = u32[] replica-id()\n ROOT cp = u32[] collective-permute(p), channel_id=1,\n source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},\n metadata={op_name=\"op1/op2/add\" source_file=\"foo/bar/mysource.py\" source_line=35}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n auto check_metadata = [](const HloInstruction* inst) {\n EXPECT_EQ(inst->metadata().op_name(), \"op1/op2/add\");\n EXPECT_EQ(inst->metadata().source_file(), \"foo/bar/mysource.py\");\n EXPECT_EQ(inst->metadata().source_line(), 35);\n };\n auto check_not_pipelined = [](const HloInstruction* instr) {\n const FrontendAttributes& attributes = instr->frontend_attributes();\n EXPECT_EQ(attributes.map().end(),\n attributes.map().find(kSendRecvPipelineAttr));\n };\n HloInstruction* after_all = FindInstruction(module.get(), \"after-all\");\n HloInstruction* recv = FindInstruction(module.get(), \"recv\");\n EXPECT_EQ(recv->operand(0), after_all);\n EXPECT_EQ(recv->channel_id().value(), 1);\n EXPECT_THAT(\n recv->ToString(),\n HasSubstr(\n \"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}\"));\n check_metadata(recv);\n check_not_pipelined(recv);\n HloInstruction* recv_done = FindInstruction(module.get(), \"recv-done\");\n EXPECT_EQ(recv_done->operand(0), recv);\n HloInstruction* send = FindInstruction(module.get(), \"send\");\n EXPECT_EQ(send->operand(1), after_all);\n EXPECT_EQ(send->channel_id().value(), 1);\n EXPECT_THAT(\n send->ToString(),\n HasSubstr(\n \"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}\"));\n check_metadata(send);\n check_not_pipelined(send);\n HloInstruction* send_done = FindInstruction(module.get(), \"send-done\");\n EXPECT_EQ(send_done->operand(0), send);\n HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::GetTupleElement(recv_done, 0));\n}\nTEST_F(CollectivePermuteDecomposerTest, NotTransformedDefaultChannelId) {\n const char* const kModuleStr = R\"(\n HloModule test\n ENTRY test_computation {\n p = u32[] replica-id()\n ROOT cp = u32[] collective-permute(p),\n source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectivePermuteDecomposerTest, ThresholdNotTransformed) {\n const char* const kModuleStr = R\"(\n HloModule test\n ENTRY test_computation {\n p = u32[] replica-id()\n ROOT cp = u32[] collective-permute(p), channel_id=1,\n source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},\n metadata={op_name=\"op1/op2/add\" source_file=\"foo/bar/mysource.py\" source_line=35}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(8);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectivePermuteDecomposerTest, Pipeline1) {\n const char* const kModuleStr = R\"(\n HloModule module\n cond {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n ub = u32[] constant(2)\n ROOT result = pred[] compare(count, ub), direction=LT\n }\n body {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n send-data = get-tuple-element(param), index=1\n recv-data = u32[2] collective-permute(send-data), channel_id=1,\n source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}},\n frontend_attributes={_xla_other_attribute=\"xyz\"}\n c1 = u32[] constant(1)\n new_count = u32[] add(count, c1)\n r = u32[2] broadcast(c1), dimensions={}\n s = u32[2] add(r, recv-data)\n ROOT result = (u32[], u32[2]) tuple(new_count, s)\n }\n ENTRY test_computation {\n c0 = u32[] constant(0)\n c1 = u32[] constant(1)\n r = u32[] replica-id()\n a = u32[] add(c1, r)\n init = u32[2] broadcast(a), dimensions={}\n while_init = (u32[], u32[2]) tuple(c0, init)\n while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond\n ROOT result = u32[2] get-tuple-element(while_result), index=1\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* recv = FindInstruction(module.get(), \"recv\");\n EXPECT_EQ(recv->channel_id().value(), 1);\n EXPECT_THAT(\n recv->ToString(),\n HasSubstr(\n \"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}\"));\n EXPECT_THAT(recv->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n EXPECT_THAT(recv->ToString(), HasSubstr(\"_xla_other_attribute=\\\"xyz\\\"\"));\n HloInstruction* recv_done = FindInstruction(module.get(), \"recv-done\");\n EXPECT_THAT(recv_done->ToString(),\n HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n HloInstruction* send = FindInstruction(module.get(), \"send\");\n EXPECT_EQ(send->channel_id().value(), 1);\n EXPECT_THAT(\n send->ToString(),\n HasSubstr(\n \"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}\"));\n EXPECT_THAT(send->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n EXPECT_THAT(send->ToString(), HasSubstr(\"_xla_other_attribute=\\\"xyz\\\"\"));\n HloInstruction* send_done = FindInstruction(module.get(), \"send-done\");\n EXPECT_THAT(send_done->ToString(),\n HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n EXPECT_FALSE(recv_done->control_predecessors().empty());\n EXPECT_EQ(recv_done->control_predecessors()[0], send);\n}\nTEST_F(CollectivePermuteDecomposerTest, ForwardPipeline2) {\n const char* const kModuleStr = R\"(\n HloModule module\n cond {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n ub = u32[] constant(2)\n ROOT result = pred[] compare(count, ub), direction=LT\n }\n body {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n send-data = get-tuple-element(param), index=1\n recv-data.0 = u32[2] collective-permute(send-data), channel_id=1,\n source_target_pairs={{3,0}}\n recv-data.1 = u32[2] collective-permute(send-data), channel_id=2,\n source_target_pairs={{0,1}, {1,2}, {2,3}}\n replica = u32[] replica-id()\n constant0 = u32[] constant(0)\n compare0 = pred[] compare(replica, constant0), direction=EQ\n compare = pred[2] broadcast(compare0), dimensions={}\n recv-data = u32[2] select(compare, recv-data.0, recv-data.1)\n c1 = u32[] constant(1)\n new_count = u32[] add(count, c1)\n r = u32[2] broadcast(c1), dimensions={}\n s = u32[2] add(r, recv-data)\n ROOT result = (u32[], u32[2]) tuple(new_count, s)\n }\n ENTRY test_computation {\n c0 = u32[] constant(0)\n c1 = u32[] constant(1)\n r = u32[] replica-id()\n a = u32[] add(c1, r)\n init = u32[2] broadcast(a), dimensions={}\n while_init = (u32[], u32[2]) tuple(c0, init)\n while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond\n ROOT result = u32[2] get-tuple-element(while_result), index=1\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* recv = FindInstruction(module.get(), \"recv\");\n EXPECT_EQ(recv->channel_id().value(), 1);\n EXPECT_THAT(recv->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{3,0}}\"));\n EXPECT_THAT(recv->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n HloInstruction* send = FindInstruction(module.get(), \"send\");\n EXPECT_THAT(send->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{3,0}}\"));\n EXPECT_THAT(send->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n HloInstruction* recv1 = FindInstruction(module.get(), \"recv.1\");\n EXPECT_EQ(recv1->channel_id().value(), 2);\n EXPECT_THAT(\n recv1->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}\"));\n EXPECT_THAT(recv1->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"1\\\"\"));\n HloInstruction* recv_done1 = FindInstruction(module.get(), \"recv-done.1\");\n EXPECT_THAT(recv_done1->ToString(),\n HasSubstr(\"_xla_send_recv_pipeline=\\\"1\\\"\"));\n HloInstruction* send1 = FindInstruction(module.get(), \"send.1\");\n EXPECT_THAT(\n send1->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}\"));\n EXPECT_THAT(send1->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"1\\\"\"));\n HloInstruction* send_done1 = FindInstruction(module.get(), \"send-done.1\");\n EXPECT_THAT(send_done1->ToString(),\n HasSubstr(\"_xla_send_recv_pipeline=\\\"1\\\"\"));\n}\nTEST_F(CollectivePermuteDecomposerTest, ForwardPipelineWithMatmul) {\n const char* const kModuleStr = R\"(\n HloModule test\n while_body {\n inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)\n iter = u32[] get-tuple-element(inputs), index=0\n iter_increment = u32[] constant(1)\n next_iter = u32[] add(iter, iter_increment)\n partition-id = u32[] partition-id()\n zero = u32[] constant(0)\n compare = pred[] compare(partition-id, zero), direction=EQ\n broadcast = pred[2,2] broadcast(compare), dimensions={}\n weights = f32[2,2] get-tuple-element(inputs), index=2\n data = f32[2,2] get-tuple-element(inputs), index=1\n cp_back = f32[2,2] collective-permute(data), channel_id=1,\n source_target_pairs={{3,0}},\n frontend_attributes={_xla_send_recv_validation=\"{{3,10}}\"}\n cp_forward = f32[2,2] collective-permute(data), channel_id=2,\n source_target_pairs={{0,1},{1,2},{2,3}},\n frontend_attributes={_xla_send_recv_validation=\"{{0,7},{1,8},{2,9}}\"}\n select = f32[2,2] select(broadcast, cp_back, cp_forward)\n matmul = f32[2,2] dot(weights, select), lhs_contracting_dims={1},\n rhs_contracting_dims={0}\n ROOT result = (u32[], f32[2,2], f32[2,2]) tuple(next_iter, matmul, weights)\n }\n while_cond {\n inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)\n iter = u32[] get-tuple-element(inputs), index=0\n max_iter = u32[] constant(3)\n ROOT compare = pred[] compare(iter, max_iter), direction=LT\n }\n ENTRY test_computation {\n start_iter = u32[] constant(0)\n input_data = f32[2,2] parameter(0)\n input_weights = f32[2,2] parameter(1)\n input = (u32[], f32[2,2], f32[2,2]) tuple(start_iter, input_data,\n input_weights)\n while_result = (u32[], f32[2,2], f32[2,2]) while(input),\n condition=while_cond, body=while_body\n ROOT data_out = f32[2,2] get-tuple-element(while_result), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n HloModule* transformed_module = module.get();\n HloComputation* while_body =\n FindComputation(transformed_module, \"while_body\");\n HloInstruction* recv_bwd = hlo_query::FindInstruction(while_body, \"recv\");\n EXPECT_EQ(recv_bwd->channel_id().value(), 1);\n auto recv_bwd_frontend_attributes = recv_bwd->frontend_attributes().map();\n EXPECT_EQ(recv_bwd_frontend_attributes.size(), 3);\n EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvValidationAttr),\n \"{{3,10}}\");\n EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvPipelineAttr), \"0\");\n EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),\n \"{{3,0}}\");\n HloInstruction* send_bwd = hlo_query::FindInstruction(while_body, \"send\");\n auto send_bwd_frontend_attributes = send_bwd->frontend_attributes().map();\n EXPECT_THAT(send_bwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),\n \"{{3,0}}\");\n HloInstruction* recv_fwd = hlo_query::FindInstruction(while_body, \"recv.1\");\n EXPECT_EQ(recv_fwd->channel_id().value(), 2);\n auto recv_fwd_frontend_attributes = recv_fwd->frontend_attributes().map();\n EXPECT_EQ(recv_fwd_frontend_attributes.size(), 3);\n EXPECT_EQ(recv_fwd_frontend_attributes.at(kSendRecvPipelineAttr), \"1\");\n EXPECT_EQ(recv_fwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),\n \"{{0,1},{1,2},{2,3}}\");\n HloInstruction* send_fwd = hlo_query::FindInstruction(while_body, \"send.1\");\n auto send_fwd_frontend_attributes = send_fwd->frontend_attributes().map();\n EXPECT_EQ(send_fwd_frontend_attributes.size(), 3);\n EXPECT_EQ(send_fwd_frontend_attributes.at(kSendRecvPipelineAttr), \"1\");\n EXPECT_EQ(send_fwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr),\n \"{{0,1},{1,2},{2,3}}\");\n EXPECT_NE(while_body, nullptr);\n HloInstruction* recv_done_fwd =\n hlo_query::FindInstruction(while_body, \"recv-done\");\n HloInstruction* recv_done_bwd =\n hlo_query::FindInstruction(while_body, \"recv-done.1\");\n EXPECT_EQ(recv_done_fwd->control_predecessors()[0], send_bwd);\n EXPECT_EQ(recv_done_bwd->control_predecessors()[0], send_fwd);\n}\nTEST_F(CollectivePermuteDecomposerTest, BackwardPipeline2) {\n const char* const kModuleStr = R\"(\n HloModule module\n cond {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n ub = u32[] constant(2)\n ROOT result = pred[] compare(count, ub), direction=LT\n }\n body {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n send-data = get-tuple-element(param), index=1\n recv-data.0 = u32[2] collective-permute(send-data), channel_id=1,\n source_target_pairs={{1,0},{2,1},{3,2}}\n recv-data.1 = u32[2] collective-permute(send-data), channel_id=2,\n source_target_pairs={{0,3}}\n replica = u32[] replica-id()\n constant0 = u32[] constant(0)\n compare0 = pred[] compare(replica, constant0), direction=NE\n compare = pred[2] broadcast(compare0), dimensions={}\n recv-data = u32[2] select(compare, recv-data.0, recv-data.1)\n c1 = u32[] constant(1)\n new_count = u32[] add(count, c1)\n r = u32[2] broadcast(c1), dimensions={}\n s = u32[2] add(r, recv-data)\n ROOT result = (u32[], u32[2]) tuple(new_count, s)\n }\n ENTRY test_computation {\n c0 = u32[] constant(0)\n c1 = u32[] constant(1)\n r = u32[] replica-id()\n a = u32[] add(c1, r)\n init = u32[2] broadcast(a), dimensions={}\n while_init = (u32[], u32[2]) tuple(c0, init)\n while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond\n ROOT result = u32[2] get-tuple-element(while_result), index=1\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((kModuleStr)));\n CollectivePermuteDecomposer decomposer(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* recv = FindInstruction(module.get(), \"recv\");\n EXPECT_EQ(recv->channel_id().value(), 1);\n EXPECT_THAT(\n recv->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{1,0},{2,1},{3,2}}\"));\n EXPECT_THAT(recv->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"1\\\"\"));\n HloInstruction* send = FindInstruction(module.get(), \"send\");\n EXPECT_THAT(\n send->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{1,0},{2,1},{3,2}}\"));\n EXPECT_THAT(send->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"1\\\"\"));\n HloInstruction* recv1 = FindInstruction(module.get(), \"recv.1\");\n EXPECT_EQ(recv1->channel_id().value(), 2);\n EXPECT_THAT(recv1->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{0,3}}\"));\n EXPECT_THAT(recv1->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n HloInstruction* send1 = FindInstruction(module.get(), \"send.1\");\n EXPECT_THAT(send1->ToString(),\n HasSubstr(\"_xla_send_recv_source_target_pairs={{0,3}}\"));\n EXPECT_THAT(send1->ToString(), HasSubstr(\"_xla_send_recv_pipeline=\\\"0\\\"\"));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_permute_decomposer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_permute_decomposer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1124,"cells":{"ID":{"kind":"string","value":"50cd28dd-fb5f-48ac-8b47-b0f528cfc036"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"collective_transformation_reorderer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/collective_transformation_reorderer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/collective_transformation_reorderer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/collective_transformation_reorderer.h\"\n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/log/check.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nstruct CollectiveTransformation {\n HloInstruction* hlo;\n int64_t transformed_collective_dimension;\n};\nstd::optional>\nGetAllGatherTransformations(HloInstruction* all_gather) {\n std::vector transformation_hlos;\n {\n HloInstruction* transformation_hlo = all_gather;\n bool found_unsupported_transformation = false;\n while (transformation_hlo->user_count() == 1 &&\n !found_unsupported_transformation) {\n transformation_hlo = transformation_hlo->users()[0];\n switch (transformation_hlo->opcode()) {\n case HloOpcode::kReshape: {\n transformation_hlos.push_back(transformation_hlo);\n break;\n }\n default:\n found_unsupported_transformation = true;\n }\n }\n }\n if (transformation_hlos.empty()) {\n return std::nullopt;\n }\n auto get_reshaped_all_gather_dimension =\n [](const Shape& all_gather_shape, int64_t all_gather_dimension,\n HloInstruction* transformation_hlo) -> std::optional {\n int64_t all_gather_num_strides = absl::c_accumulate(\n all_gather_shape.dimensions().subspan(0, all_gather_dimension), 1,\n [](int64_t product, int64_t dimension_size) {\n return product * dimension_size;\n });\n int64_t reshaped_all_gather_dimension = 0;\n int64_t reshaped_num_strides = 1;\n while (reshaped_all_gather_dimension <\n transformation_hlo->shape().dimensions_size() &&\n reshaped_num_strides < all_gather_num_strides) {\n reshaped_num_strides *=\n transformation_hlo->shape().dimensions(reshaped_all_gather_dimension);\n ++reshaped_all_gather_dimension;\n }\n if (reshaped_num_strides != all_gather_num_strides) {\n return std::nullopt;\n }\n if (transformation_hlo->shape().dimensions(reshaped_all_gather_dimension) !=\n all_gather_shape.dimensions(all_gather_dimension)) {\n return std::nullopt;\n }\n return reshaped_all_gather_dimension;\n };\n std::vector transformations;\n HloAllGatherInstruction* all_gather_instruction =\n DynCast(all_gather);\n Shape all_gather_shape = all_gather_instruction->shape();\n int64_t all_gather_dimension = all_gather_instruction->all_gather_dimension();\n CHECK(all_gather_instruction != nullptr);\n for (HloInstruction* transformation_hlo : transformation_hlos) {\n bool found_unsupported_transformation = false;\n switch (transformation_hlo->opcode()) {\n case HloOpcode::kReshape: {\n std::optional reshaped_all_gather_dimension =\n get_reshaped_all_gather_dimension(\n all_gather_shape, all_gather_dimension, transformation_hlo);\n if (reshaped_all_gather_dimension.has_value()) {\n transformations.push_back(\n {transformation_hlo, *reshaped_all_gather_dimension});\n all_gather_shape = transformation_hlo->shape();\n all_gather_dimension = *reshaped_all_gather_dimension;\n } else {\n found_unsupported_transformation = true;\n }\n break;\n }\n default:\n return std::nullopt;\n }\n if (found_unsupported_transformation) {\n break;\n }\n }\n if (transformations.empty()) {\n return std::nullopt;\n }\n return transformations;\n}\nstd::vector GetAllReduceTransformations(\n HloInstruction* all_reduce) {\n HloAllReduceInstruction* all_reduce_instruction =\n DynCast(all_reduce);\n CHECK_NE(all_reduce_instruction, nullptr);\n if (all_reduce_instruction->constrain_layout()) {\n return {};\n }\n std::vector transformation_hlos;\n HloInstruction* transformation_hlo = all_reduce->mutable_operand(0);\n while (transformation_hlo->opcode() == HloOpcode::kReshape &&\n transformation_hlo->user_count() == 1) {\n transformation_hlos.push_back(transformation_hlo);\n transformation_hlo = transformation_hlo->mutable_operand(0);\n }\n return transformation_hlos;\n}\n} \nabsl::StatusOr\nCollectiveTransformationReorder::ReorderAllGatherTransformations(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n HloInstructionMap>\n all_gather_to_transformations;\n for (HloComputation* computation :\n module->MakeComputationPostOrder(execution_threads)) {\n for (HloInstruction* instruction :\n computation->MakeInstructionPostOrder()) {\n if (instruction->opcode() == HloOpcode::kAllGather) {\n if (instruction->operand_count() != 1) {\n continue;\n }\n std::optional>\n all_gather_transformations =\n GetAllGatherTransformations(instruction);\n if (all_gather_transformations.has_value()) {\n all_gather_to_transformations[instruction] =\n *std::move(all_gather_transformations);\n }\n }\n }\n }\n if (all_gather_to_transformations.empty()) {\n return false;\n }\n auto reshape_all_gather_operand =\n [](HloInstruction* all_gather_operand,\n int64_t original_all_gather_dimension,\n const CollectiveTransformation& transformation) {\n Shape reshaped_all_gather_operand_shape = transformation.hlo->shape();\n int64_t operand_all_gather_dimension_size =\n all_gather_operand->shape().dimensions(\n original_all_gather_dimension);\n reshaped_all_gather_operand_shape.set_dimensions(\n transformation.transformed_collective_dimension,\n operand_all_gather_dimension_size);\n HloComputation* computation = all_gather_operand->parent();\n return computation->AddInstruction(HloInstruction::CreateReshape(\n reshaped_all_gather_operand_shape, all_gather_operand));\n };\n for (auto& [instruction, transformations] : all_gather_to_transformations) {\n HloAllGatherInstruction* all_gather =\n DynCast(instruction);\n int64_t all_gather_dimension = all_gather->all_gather_dimension();\n int64_t original_all_gather_dimension_size =\n all_gather->shape().dimensions(all_gather_dimension);\n HloInstruction* all_gather_operand = instruction->mutable_operand(0);\n for (const CollectiveTransformation& transformation : transformations) {\n all_gather_operand = reshape_all_gather_operand(\n all_gather_operand, all_gather_dimension, transformation);\n all_gather_dimension = transformation.transformed_collective_dimension;\n }\n Shape new_all_gather_shape = all_gather_operand->shape();\n new_all_gather_shape.set_dimensions(all_gather_dimension,\n original_all_gather_dimension_size);\n HloComputation* computation = all_gather_operand->parent();\n HloInstruction* new_all_gather =\n computation->AddInstruction(HloInstruction::CreateAllGather(\n new_all_gather_shape, {all_gather_operand}, all_gather_dimension,\n all_gather->device_list(), all_gather->constrain_layout(),\n all_gather->channel_id(), all_gather->use_global_device_ids()));\n TF_RETURN_IF_ERROR(\n transformations.back().hlo->ReplaceAllUsesWith(new_all_gather));\n if (computation->root_instruction() == transformations.back().hlo) {\n computation->set_root_instruction(new_all_gather);\n }\n }\n return true;\n}\nabsl::StatusOr\nCollectiveTransformationReorder::ReorderAllReduceTransformations(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n HloInstructionMap> all_reduce_to_transformations;\n for (HloComputation* computation :\n module->MakeComputationPostOrder(execution_threads)) {\n for (HloInstruction* instruction :\n computation->MakeInstructionPostOrder()) {\n if (instruction->opcode() == HloOpcode::kAllReduce) {\n if (instruction->user_count() != 1 ||\n computation->root_instruction() == instruction) {\n continue;\n }\n std::vector reshapes =\n GetAllReduceTransformations(instruction);\n if (reshapes.empty()) {\n continue;\n }\n all_reduce_to_transformations[instruction] = std::move(reshapes);\n }\n }\n }\n if (all_reduce_to_transformations.empty()) {\n return false;\n }\n for (auto& [inst, reshapes] : all_reduce_to_transformations) {\n HloComputation* computation = inst->parent();\n HloAllReduceInstruction* all_reduce =\n DynCast(inst);\n CHECK(!reshapes.empty());\n HloInstruction* cur_operand = reshapes.back()->mutable_operand(0);\n HloInstruction* new_all_reduce =\n computation->AddInstruction(HloInstruction::CreateAllReduce(\n cur_operand->shape(), {cur_operand}, all_reduce->to_apply(),\n all_reduce->device_list(), all_reduce->constrain_layout(),\n all_reduce->channel_id(), all_reduce->use_global_device_ids()));\n cur_operand = new_all_reduce;\n for (int64_t i = reshapes.size() - 1; i >= 0; --i) {\n cur_operand = computation->AddInstruction(\n HloInstruction::CreateReshape(reshapes[i]->shape(), cur_operand));\n }\n TF_RETURN_IF_ERROR(\n computation->ReplaceInstruction(all_reduce, cur_operand));\n }\n return true;\n}\nabsl::StatusOr CollectiveTransformationReorder::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n TF_ASSIGN_OR_RETURN(bool ag_changed, ReorderAllGatherTransformations(\n module, execution_threads));\n TF_ASSIGN_OR_RETURN(bool ar_changed, ReorderAllReduceTransformations(\n module, execution_threads));\n if (ag_changed || ar_changed) {\n HloDCE dce;\n TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());\n }\n return ag_changed || ar_changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/collective_transformation_reorderer.h\"\n#include \n#include \n#include \n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass CollectiveTransformationReordererTest : public HloTestBase {\n public:\n absl::StatusOr RunCollectiveTransformationReorderer(HloModule* module) {\n CollectiveTransformationReorder reorderer;\n return reorderer.Run(module, {});\n }\n};\nTEST_F(CollectiveTransformationReordererTest,\n ReshapeWithinShardAfterAllGatherDim) {\n absl::string_view hlo_string = R\"(\n HloModule module\n ENTRY entry {\n param = bf16[8,4,1024] parameter(0)\n all-gather = bf16[8,32,1024] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1\n ROOT reshape = bf16[8,32,8,128] reshape(all-gather)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::AllGather(op::Reshape(op::Parameter())));\n HloInstruction* all_gather = module->entry_computation()->root_instruction();\n EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));\n}\nTEST_F(CollectiveTransformationReordererTest,\n ReshapeWithinShardBeforeAllGatherDim) {\n absl::string_view hlo_string = R\"(\n HloModule module\n ENTRY entry {\n param = bf16[8,32,8,4,1024] parameter(0)\n all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1\n ROOT reshape = bf16[2048,32,1024] reshape(all-gather)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::AllGather(op::Reshape(op::Parameter())));\n HloInstruction* all_gather = module->entry_computation()->root_instruction();\n EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));\n}\nTEST_F(CollectiveTransformationReordererTest,\n ReshapeWithinShardBeforeAndAfterAllGatherDim) {\n absl::string_view hlo_string = R\"(\n HloModule module\n ENTRY entry {\n param = bf16[8,32,8,4,1024] parameter(0)\n all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1\n ROOT reshape = bf16[2048,32,8,128] reshape(all-gather)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::AllGather(op::Reshape(op::Parameter())));\n HloInstruction* all_gather = module->entry_computation()->root_instruction();\n EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1));\n}\nTEST_F(CollectiveTransformationReordererTest, ReshapeAcrossShards) {\n absl::string_view hlo_string = R\"(\n HloModule module\n ENTRY entry {\n param = bf16[8,1,8,128] parameter(0)\n all-gather = bf16[8,8,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1\n ROOT reshape = bf16[64,8,128] reshape(all-gather)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectiveTransformationReordererTest, MergeAllGatherDimensionWithNext) {\n absl::string_view hlo_string = R\"(\n HloModule module\n ENTRY entry {\n param = bf16[8,8,16,16] parameter(0)\n all-gather = bf16[64,8,16,16] all-gather(param), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1\n ROOT reshape = bf16[512,16,16] reshape(all-gather)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectiveTransformationReordererTest,\n MergeAllGatherDimensionWithPrevious) {\n absl::string_view hlo_string = R\"(\n HloModule module\n ENTRY entry {\n param = bf16[8,8,16,16] parameter(0)\n all-gather = bf16[8,64,16,16] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1\n ROOT reshape = bf16[512,16,16] reshape(all-gather)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectiveTransformationReordererTest, AllReduceSingleReshape) {\n absl::string_view hlo_string = R\"(\n HloModule module\n add {\n a = bf16[] parameter(0)\n b = bf16[] parameter(1)\n ROOT s = bf16[] add(a, b)\n }\n ENTRY entry {\n param = bf16[16384,6144] parameter(0)\n reshape = bf16[1,16384,6144] reshape(param)\n all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add\n constant = s32[] constant(0)\n ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_TRUE(changed);\n TF_ASSERT_OK(HloVerifier(false,\n true)\n .Run(module.get())\n .status());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::DynamicSlice(op::Reshape(op::AllReduce(op::Parameter())),\n op::Constant(), op::Constant(), op::Constant()));\n}\nTEST_F(CollectiveTransformationReordererTest, AllReduceTwoReshapes) {\n absl::string_view hlo_string = R\"(\n HloModule module\n add {\n a = bf16[] parameter(0)\n b = bf16[] parameter(1)\n ROOT s = bf16[] add(a, b)\n }\n ENTRY entry {\n param = bf16[16384,3072,2] parameter(0)\n reshape.1 = bf16[16384,6144] reshape(param)\n reshape.2 = bf16[1,16384,6144] reshape(reshape.1)\n all-reduce = bf16[1,16384,6144] all-reduce(reshape.2), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add\n constant = s32[] constant(0)\n ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_TRUE(changed);\n TF_ASSERT_OK(HloVerifier(false,\n true)\n .Run(module.get())\n .status());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::DynamicSlice(op::Reshape(op::Reshape(op::AllReduce(op::Parameter()))),\n op::Constant(), op::Constant(), op::Constant()));\n}\nTEST_F(CollectiveTransformationReordererTest, AllReduceReshapeWithTwoUsers) {\n absl::string_view hlo_string = R\"(\n HloModule module\n add {\n a = bf16[] parameter(0)\n b = bf16[] parameter(1)\n ROOT s = bf16[] add(a, b)\n }\n ENTRY entry {\n param = bf16[16384,6144] parameter(0)\n reshape = bf16[1,16384,6144] reshape(param)\n all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add\n constant = s32[] constant(0)\n dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}\n copy = bf16[1,16384,6144] copy(reshape)\n ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectiveTransformationReordererTest, AllReduceWithTwoUsersReshape) {\n absl::string_view hlo_string = R\"(\n HloModule module\n add {\n a = bf16[] parameter(0)\n b = bf16[] parameter(1)\n ROOT s = bf16[] add(a, b)\n }\n ENTRY entry {\n param = bf16[16384,6144] parameter(0)\n reshape = bf16[1,16384,6144] reshape(param)\n all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add\n constant = s32[] constant(0)\n dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}\n copy = bf16[1,16384,6144] copy(all-reduce)\n ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CollectiveTransformationReordererTest, AllReduceConstrainLayout) {\n absl::string_view hlo_string = R\"(\n HloModule module\n add {\n a = bf16[] parameter(0)\n b = bf16[] parameter(1)\n ROOT s = bf16[] add(a, b)\n }\n ENTRY entry {\n param = bf16[16384,6144] parameter(0)\n reshape = bf16[1,16384,6144] reshape(param)\n all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, constrain_layout=true, to_apply=add\n constant = s32[] constant(0)\n ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunCollectiveTransformationReorderer(module.get()));\n EXPECT_FALSE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_transformation_reorderer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_transformation_reorderer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1125,"cells":{"ID":{"kind":"string","value":"df627059-2ef8-47c7-880a-5779a2e8d0a0"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_unstacker"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/hlo_unstacker.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/hlo_unstacker_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/hlo_unstacker.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/map_util.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/tuple_util.h\"\n#include \"xla/service/while_loop_unroller.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nenum class PatternType {\n DSFusionNoBitcastPattern,\n DSFusionPattern,\n NestedDSFusionPattern,\n Other,\n};\nstatic std::string PatternTypeToString(PatternType pattern_type) {\n switch (pattern_type) {\n case PatternType::DSFusionNoBitcastPattern:\n return \"DSFusionNoBitcastPattern\";\n case PatternType::DSFusionPattern:\n return \"DSFusionPattern\";\n case PatternType::NestedDSFusionPattern:\n return \"NestedDSFusionPattern\";\n case PatternType::Other:\n return \"Other\";\n }\n}\nstruct PatternInfo {\n PatternType type;\n std::vector unstacked_instrs;\n const HloInstruction* instr;\n Shape unstacked_shape;\n HloComputation* unstacking_computation;\n std::string ToString() const {\n if (unstacking_computation == nullptr) {\n return absl::StrCat(\"type: \\n\\t\", PatternTypeToString(type), \"\\n\",\n \"instr: \\n\\t\", instr->name(), \"\\n\", \"shape: \\n\\t\",\n unstacked_shape.ToString(true));\n } else {\n return absl::StrCat(\"type: \\n\\t\", PatternTypeToString(type), \"\\n\",\n \"instr: \\n\\t\", instr->name(), \"\\n\", \"shape: \\n\\t\",\n unstacked_shape.ToString(true), \"\\n\", \"comp: \\n\",\n unstacking_computation->name());\n }\n }\n};\nstruct UnstackerMetadata {\n static absl::StatusOr Create(\n HloModule* module, std::function unfuse_slice) {\n UnstackerMetadata metadata;\n TF_ASSIGN_OR_RETURN(\n bool prepared,\n WhileLoopUnroller::PrepareModuleForUnrolling(module, {}));\n if (prepared) {\n VLOG(3) << \"Prepared module: \" << module->name() << \" for unstacking.\";\n }\n std::vector> loops =\n WhileLoopUnroller::GetUnrollableLoops(module, {},\n std::nullopt);\n for (const auto& [instr, while_loop_config] : loops) {\n metadata.unrollable_loop_bodies[instr->while_body()] = while_loop_config;\n metadata.bodies[instr->while_body()] = instr;\n }\n metadata.unfuse_slice = unfuse_slice;\n return metadata;\n }\n absl::flat_hash_map unrollable_loop_bodies;\n absl::flat_hash_map bodies;\n std::vector<\n std::pair(\n const UnstackerMetadata&, const HloInstruction*, int64_t)>,\n std::function>>\n custom_handlers;\n std::function unfuse_slice;\n};\nclass UnstackerTransformer {\n public:\n explicit UnstackerTransformer(const UnstackerMetadata& metadata)\n : metadata_(metadata) {}\n std::vector HandleInstruction(\n const HloInstruction* instr, int64_t changed_idx) {\n if (instr->opcode() != HloOpcode::kFusion) {\n return {};\n }\n VLOG(3) << \"HandleInstruction(\" << instr->shape().ToString()\n << instr->name() << \", \" << changed_idx << \")\";\n for (const auto& [custom_pattern, custom_handler] :\n metadata_.custom_handlers) {\n std::optional stacked_user =\n custom_pattern(metadata_, instr, changed_idx);\n if (!stacked_user.has_value()) {\n continue;\n }\n PatternInfo& pattern_info = stacked_user.value();\n pattern_type_ = pattern_info.type;\n VLOG(3) << \"PatternInfo:\" << \"\\n\" << pattern_info.ToString();\n if (pattern_info.unstacking_computation != nullptr &&\n unstacking_computation_ != nullptr) {\n if (!absl::EqualsIgnoreCase(\n pattern_info.unstacking_computation->ToString(\n HloPrintOptions::Fingerprint()),\n unstacking_computation_->ToString(\n HloPrintOptions::Fingerprint()))) {\n VLOG(3) << \"Seen multiple unstacking computations, cannot handle: \"\n << \"\\n previous computations: \\n\"\n << unstacking_computation_->ToString(\n HloPrintOptions::Fingerprint())\n << \"\\n current computations: \\n\"\n << pattern_info.unstacking_computation->ToString(\n HloPrintOptions::Fingerprint());\n return {};\n }\n }\n if (pattern_info.unstacking_computation != nullptr) {\n unstacking_computation_ = pattern_info.unstacking_computation;\n }\n unstacked_shape_ = std::make_unique(pattern_info.unstacked_shape);\n unstacked_instrs_.push_back(instr);\n std::function unstack_wrapper =\n [&custom_handler = custom_handler,\n pattern_info]() mutable -> absl::Status {\n HloInstruction* mutable_dynamic_slicing_fusion =\n const_cast(pattern_info.instr);\n return custom_handler(mutable_dynamic_slicing_fusion,\n pattern_info.unstacked_shape.tuple_shapes(0));\n };\n body_changes_.push_back(unstack_wrapper);\n return pattern_info.unstacked_instrs;\n }\n return {};\n }\n const UnstackerMetadata& GetMetadata() const { return metadata_; }\n std::vector& GetUnstackedInstructions() {\n return unstacked_instrs_;\n }\n const Shape* GetUnstackedShape() const { return unstacked_shape_.get(); }\n HloComputation* GetUnstackingComputation() const {\n return unstacking_computation_;\n }\n std::vector>&\n GetLoopChanges() {\n return loop_changes_;\n }\n std::vector>& GetBodyChanges() {\n return body_changes_;\n }\n absl::flat_hash_map>&\n GetOperandChanges() {\n return operand_changes_;\n }\n void AddOperandChange(HloInstruction* instr, int64_t index) {\n operand_changes_[instr].push_back(index);\n }\n void AddLoopChange(\n std::function loop_change) {\n loop_changes_.push_back(loop_change);\n }\n PatternType GetPatternType() const { return pattern_type_; }\n private:\n PatternType pattern_type_;\n const UnstackerMetadata& metadata_;\n std::unique_ptr unstacked_shape_ = nullptr;\n HloComputation* unstacking_computation_ = nullptr;\n std::vector> loop_changes_;\n std::vector> body_changes_;\n absl::flat_hash_map> operand_changes_;\n std::vector unstacked_instrs_;\n};\nbool CanUnstackWhileOperand(const HloInstruction* while_instr,\n UnstackerTransformer& unstacker, int64_t index);\nbool UnstackWhileOperandAtIndex(\n const UnstackerMetadata& metadata, HloInstruction* while_instr,\n int64_t index, std::vector& unstacked_instructions);\nbool PropagateGteShapeChange(HloInstruction* gte,\n UnstackerTransformer& unstacker) {\n VLOG(5) << \"PropagateGteShapeChange(\" << gte->name() << \")\";\n std::vector handled_instrs;\n absl::flat_hash_map visited;\n std::deque worklist;\n worklist.push_back(gte);\n visited.insert({gte, gte->tuple_index()});\n while (!worklist.empty()) {\n HloInstruction* changed_instr_to_propagate = worklist.front();\n int64_t changed_operand_index =\n FindOrDie(visited, changed_instr_to_propagate);\n worklist.pop_front();\n for (HloInstruction* user : changed_instr_to_propagate->users()) {\n if (ContainsKey(visited, user)) {\n continue;\n }\n if (user->opcode() == HloOpcode::kGetTupleElement) {\n if (user->tuple_index() != changed_operand_index) {\n continue;\n }\n visited.insert({user, changed_operand_index});\n worklist.push_back(user);\n } else if (user->opcode() == HloOpcode::kTuple) {\n int64_t use_index = user->operand_index(changed_instr_to_propagate);\n visited.insert({user, {use_index}});\n worklist.push_back(user);\n } else if (user->opcode() == HloOpcode::kWhile) {\n bool changed_nested_while =\n CanUnstackWhileOperand(user, unstacker, changed_operand_index);\n if (!changed_nested_while) {\n return false;\n }\n visited.insert({user, changed_operand_index});\n worklist.push_back(user);\n } else {\n if (absl::c_find(handled_instrs, user) != handled_instrs.end()) {\n continue;\n }\n if (user->IsCustomCall(\"DynamicGte\") ||\n user->IsCustomCall(\"DynamicTuple\")) {\n continue;\n }\n int64_t use_index = user->operand_index(changed_instr_to_propagate);\n std::vector curr_handled_instrs =\n unstacker.HandleInstruction(user, use_index);\n if (curr_handled_instrs.empty()) {\n VLOG(3) << \"Custom unstacker not found for \" << user->name();\n return false;\n }\n for (const HloInstruction* instr : curr_handled_instrs) {\n for (HloInstruction* handled_instr_user : instr->users()) {\n if (user->shape() == gte->shape()) {\n visited.insert({handled_instr_user, changed_operand_index});\n worklist.push_back(handled_instr_user);\n }\n }\n handled_instrs.push_back(instr);\n }\n }\n }\n }\n for (const auto& [instr, index] : visited) {\n unstacker.AddOperandChange(instr, index);\n }\n return true;\n}\nbool CanPropagateGteShapeChangesInComputation(\n const HloComputation* comp, const HloInstruction* operand,\n UnstackerTransformer& shape_transformer, int64_t idx) {\n VLOG(3) << \"Propagating shape change of index \" << idx\n << \" in : \" << comp->name();\n for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {\n if (instr->opcode() == HloOpcode::kGetTupleElement &&\n instr->tuple_index() == idx) {\n if (instr->operand(0) != operand) {\n continue;\n }\n bool can_propagate = PropagateGteShapeChange(instr, shape_transformer);\n if (!can_propagate) {\n VLOG(3) << \"Failed to propagate shape change for \" << instr->name();\n return false;\n }\n }\n }\n VLOG(3) << \"Finish propagating shape change of index \" << idx\n << \" in: \" << comp->name();\n return true;\n}\nvoid UnstackWhileInput(const UnstackerTransformer& unstacker,\n HloInstruction* while_instr, int64_t index) {\n VLOG(3) << \"Unstacking while input: \" << while_instr->name() << \" at \"\n << index;\n const Shape* new_shape = unstacker.GetUnstackedShape();\n HloComputation* unstacking_computation = unstacker.GetUnstackingComputation();\n const Shape& slice_shape = new_shape->tuple_shapes(0);\n HloInstruction* old_while_input =\n while_instr->while_init()->mutable_operand(index);\n if (old_while_input->shape().IsTuple()) {\n VLOG(3) << \"Input is already unstacked: \" << old_while_input->name();\n return;\n }\n std::vector slices;\n if (old_while_input->IsCustomCall(\"AllocateBuffer\")) {\n for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) {\n slices.push_back(while_instr->AddInstruction(\n HloInstruction::CreateCustomCall(slice_shape, {}, \"AllocateBuffer\")));\n }\n } else {\n for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) {\n HloInstruction* root_instr = unstacking_computation->root_instruction();\n HloInstruction* slice = nullptr;\n if (unstacker.GetPatternType() == PatternType::DSFusionPattern ||\n unstacker.GetPatternType() == PatternType::NestedDSFusionPattern ||\n unstacker.GetPatternType() == PatternType::DSFusionNoBitcastPattern) {\n HloInstruction* dynamic_slice = nullptr;\n if (unstacker.GetPatternType() == PatternType::DSFusionPattern ||\n unstacker.GetPatternType() == PatternType::NestedDSFusionPattern) {\n dynamic_slice = root_instr->mutable_operand(0);\n } else if (unstacker.GetPatternType() ==\n PatternType::DSFusionNoBitcastPattern) {\n dynamic_slice = root_instr;\n }\n std::vector new_start_indices;\n new_start_indices.reserve(dynamic_slice->shape().rank());\n std::vector new_limit_indices;\n new_limit_indices.reserve(dynamic_slice->shape().rank());\n std::vector new_strides;\n new_strides.reserve(dynamic_slice->shape().rank());\n new_start_indices.push_back(i);\n new_limit_indices.push_back(i + 1);\n new_strides.push_back(1);\n for (int64_t j = 1; j < dynamic_slice->shape().rank(); ++j) {\n new_start_indices.push_back(0);\n new_limit_indices.push_back(\n dynamic_slice->mutable_operand(0)->shape().dimensions(j));\n new_strides.push_back(1);\n }\n slice = while_instr->AddInstruction(HloInstruction::CreateSlice(\n dynamic_slice->shape(), old_while_input, new_start_indices,\n new_limit_indices, new_strides));\n }\n if (slice == nullptr || !unstacker.GetMetadata().unfuse_slice(slice)) {\n std::vector operands = {\n old_while_input,\n while_instr->AddInstruction(MakeScalarConstantWithShape(\n unstacking_computation->parameter_instruction(1)->shape(), i))};\n slice = while_instr->AddInstruction(HloInstruction::CreateFusion(\n slice_shape, HloInstruction::FusionKind::kLoop, operands,\n while_instr->GetModule()->AddEmbeddedComputation(\n unstacking_computation->Clone()),\n \"hoisted\"));\n }\n slices.push_back(slice);\n }\n }\n HloInstruction* new_operand_element =\n while_instr->AddInstruction(HloInstruction::CreateTuple(slices));\n HloInstruction* new_while_init =\n TupleUtil::ReplaceTupleWith(new_operand_element,\n while_instr->while_init(), {index}, false)\n .value();\n CHECK_OK(while_instr->ReplaceOperandWithDifferentShape(0, new_while_init));\n}\nbool CanUnstackWhileOperand(const HloInstruction* while_instr,\n UnstackerTransformer& unstacker, int64_t index) {\n VLOG(5) << \"ReplaceWhileOperandShape: \" << while_instr->name() << \" at \"\n << index;\n bool body_changes_collected = CanPropagateGteShapeChangesInComputation(\n while_instr->while_body(),\n while_instr->while_body()->parameter_instruction(0), unstacker, index);\n if (!body_changes_collected) {\n return false;\n }\n bool condition_changes_collected = CanPropagateGteShapeChangesInComputation(\n while_instr->while_condition(),\n while_instr->while_condition()->parameter_instruction(0), unstacker,\n index);\n if (!condition_changes_collected) {\n return false;\n }\n bool parent_changes_collected = CanPropagateGteShapeChangesInComputation(\n while_instr->parent(), while_instr, unstacker, index);\n if (!parent_changes_collected) {\n VLOG(3) << \"Failed: parent_changes_collected\";\n return false;\n }\n HloInstruction* root_operand =\n while_instr->while_body()->root_instruction()->mutable_operand(index);\n if (root_operand == nullptr) {\n return false;\n }\n HloInstruction* gte_operand = nullptr;\n if (Match(root_operand, match::GetTupleElement(match::Op(&gte_operand)))) {\n if (Match(gte_operand, match::While())) {\n VLOG(3) << \"Faced a gte originating from loop: \"\n << root_operand->ToString();\n bool loop_feeding_root_changes_collected = CanUnstackWhileOperand(\n root_operand->operand(0), unstacker, root_operand->tuple_index());\n if (!loop_feeding_root_changes_collected) {\n VLOG(3) << \"Failed: loop \" << root_operand->operand(0)->name()\n << \" output at \" << index << \" is not unstackable\";\n return false;\n }\n } else if (!Match(gte_operand, match::Parameter().WithParameterNum(0))) {\n VLOG(3) << \"Failed: root operand of while_body at \" << index\n << \" is not a parameter\";\n return false;\n }\n }\n auto loop_change = [=](const UnstackerTransformer& unstacker,\n HloInstruction* loop, int64_t idx) mutable {\n Shape old_shape = ShapeUtil::MakeStaticShape(\n loop->while_body()->parameter_instruction(0)->shape());\n ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), idx,\n &old_shape);\n loop->while_body()->ReplaceParameter(\n 0, HloInstruction::CreateParameter(0, old_shape, \"unstacked\"));\n loop->while_condition()->ReplaceParameter(\n 0, HloInstruction::CreateParameter(0, old_shape, \"unstacked\"));\n CHECK_NE(unstacker.GetUnstackingComputation(), nullptr);\n UnstackWhileInput(unstacker, loop, idx);\n *loop->mutable_shape() = old_shape;\n };\n auto loop_change_wrapper = [&loop_change, while_instr,\n index](const UnstackerTransformer& unstacker) {\n HloInstruction* mutable_loop = const_cast(while_instr);\n loop_change(unstacker, mutable_loop, index);\n };\n unstacker.AddLoopChange(loop_change_wrapper);\n return true;\n}\nbool UnstackWhileOperandAtIndex(\n const UnstackerMetadata& metadata, HloInstruction* while_instr,\n int64_t index, std::vector& unstacked_instructions) {\n UnstackerTransformer unstacker = UnstackerTransformer(metadata);\n bool can_unstack = CanUnstackWhileOperand(while_instr, unstacker, index);\n if (!can_unstack) {\n VLOG(3) << \"Unstacking failed for \" << while_instr->name() << \" at \"\n << index;\n return false;\n }\n if (unstacker.GetUnstackedShape() == nullptr) {\n VLOG(3) << \"Failed: unstacked shape is null\";\n return false;\n }\n if (unstacker.GetUnstackingComputation() == nullptr) {\n VLOG(3) << \"Failed: unstacking computation is null\";\n return false;\n }\n for (auto& [instr, indices] : unstacker.GetOperandChanges()) {\n switch (instr->opcode()) {\n case HloOpcode::kGetTupleElement:\n VLOG(3) << \"Changing shape of: \" << instr->name();\n *instr->mutable_shape() = *unstacker.GetUnstackedShape();\n break;\n case HloOpcode::kTuple: {\n for (int64_t index : indices) {\n VLOG(3) << \"Changing shape of: \" << instr->name() << \" at \" << index;\n *instr->mutable_shape()->mutable_tuple_shapes(index) =\n *unstacker.GetUnstackedShape();\n }\n break;\n }\n case HloOpcode::kWhile:\n for (int64_t index : indices) {\n VLOG(3) << \"Changing shape of: \" << instr->name() << \" at \" << index;\n ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), index,\n instr->mutable_shape());\n }\n break;\n default:\n LOG(FATAL) << \"Unsupported opcode: \" << instr->name();\n }\n }\n for (const auto& body_change : unstacker.GetBodyChanges()) {\n CHECK_OK(body_change());\n }\n for (auto& loop_change : unstacker.GetLoopChanges()) {\n loop_change(unstacker);\n }\n for (const HloInstruction* instr : unstacker.GetUnstackedInstructions()) {\n unstacked_instructions.push_back(instr);\n }\n return true;\n}\nShape MakeUnstackedShapeFromSlice(const Shape& slice_shape, int64_t layers) {\n std::vector shapes;\n shapes.reserve(layers);\n for (int64_t i = 0; i < layers; ++i) {\n shapes.push_back(slice_shape);\n }\n return ShapeUtil::MakeTupleShape(shapes);\n}\nstd::optional IsFusionInsideUnrollableLoopWithNumParameter(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t num_fusion_params) {\n if (instr->opcode() != HloOpcode::kFusion) {\n return std::nullopt;\n }\n if (instr->fused_parameters().size() != num_fusion_params) {\n VLOG(3) << \"Fusion has different number of parameters\";\n return std::nullopt;\n }\n if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {\n VLOG(5) << \"Fusion not inside unrollable while body, \" << instr->name()\n << \" inside \" << instr->parent()->name();\n return std::nullopt;\n }\n return metadata.unrollable_loop_bodies.at(instr->parent());\n}\nHloInstruction* GetMostMajorEffectivelyStaticDynamicSliceInFusion(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t num_fusion_params, int64_t stacked_operand_idx) {\n std::optional while_instr_config =\n IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr,\n num_fusion_params);\n if (!while_instr_config.has_value()) {\n return nullptr;\n }\n for (HloInstruction* fused_instr :\n instr->fused_instructions_computation()->MakeInstructionPostOrder()) {\n std::optional dynamic_index =\n MatchEffectivelyStaticDynamicSliceInsideLoop(\n fused_instr,\n instr->fused_instructions_computation()->parameter_instruction(\n stacked_operand_idx),\n while_instr_config.value());\n if (dynamic_index.has_value() && dynamic_index.value() == 0) {\n return fused_instr;\n }\n }\n return nullptr;\n}\nHloInstruction* GetMostMajorShapeCoveringDynamicIndexInFusion(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n HloOpcode opcode, int64_t num_fusion_params, int64_t stacked_operand_idx) {\n std::optional while_instr_config =\n IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr,\n num_fusion_params);\n if (!while_instr_config.has_value()) {\n return nullptr;\n }\n for (HloInstruction* fused_instr :\n instr->fused_instructions_computation()->MakeInstructionPostOrder()) {\n if (fused_instr->opcode() != opcode) {\n continue;\n }\n std::optional dynamic_index =\n MatchShapeCoveringDynamicIndexInstruction(\n fused_instr,\n instr->fused_instructions_computation()->parameter_instruction(\n stacked_operand_idx),\n opcode, while_instr_config.value());\n if (dynamic_index.has_value() && dynamic_index.value() == 0) {\n return fused_instr;\n }\n }\n return nullptr;\n}\nstd::optional GetDSFusionPattern(const UnstackerMetadata& metadata,\n const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n VLOG(3) << \"Checking DSFusion\";\n HloInstruction* shape_covering_instr =\n GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2,\n stacked_operand_idx);\n if (shape_covering_instr == nullptr) {\n return std::nullopt;\n }\n HloInstruction* bitcast_operand = nullptr;\n if (Match(instr->fused_instructions_computation()->root_instruction(),\n match::Bitcast(match::Op(&bitcast_operand)))) {\n if (bitcast_operand == shape_covering_instr) {\n PatternInfo pattern_info;\n pattern_info.type = PatternType::DSFusionPattern;\n pattern_info.instr = instr;\n const Shape& slice_shape = shape_covering_instr->shape();\n const int64_t num_layers = instr->operand(0)->shape().dimensions(0);\n pattern_info.unstacked_shape =\n MakeUnstackedShapeFromSlice(slice_shape, num_layers);\n pattern_info.unstacking_computation =\n instr->fused_instructions_computation();\n pattern_info.unstacked_instrs.push_back(instr);\n return pattern_info;\n }\n }\n return std::nullopt;\n}\nabsl::Status UnstackDSFusionPattern(\n HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {\n HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent();\n HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0);\n HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1);\n HloInstruction* new_operand =\n parent_loop->AddInstruction(HloInstruction::CreateCustomCall(\n slice_shape, {stacked, offset}, \"DynamicGte\"));\n HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction(\n HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(),\n new_operand));\n return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape(\n bitcast);\n}\nstd::optional GetDSFusionNoBitcastPattern(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n VLOG(3) << \"Checking DSFusionNoBitcast\";\n HloInstruction* shape_covering_instr =\n GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2,\n stacked_operand_idx);\n if (shape_covering_instr == nullptr) {\n return std::nullopt;\n }\n if (instr->fused_instructions_computation()->root_instruction() !=\n shape_covering_instr) {\n return std::nullopt;\n }\n PatternInfo pattern_info;\n pattern_info.type = PatternType::DSFusionNoBitcastPattern;\n pattern_info.instr = instr;\n const Shape& slice_shape = shape_covering_instr->shape();\n const int64_t num_layers = instr->operand(0)->shape().dimensions(0);\n pattern_info.unstacked_shape =\n MakeUnstackedShapeFromSlice(slice_shape, num_layers);\n pattern_info.unstacking_computation = instr->fused_instructions_computation();\n pattern_info.unstacked_instrs.push_back(instr);\n return pattern_info;\n}\nabsl::Status UnstackDSFusionNoBitcastPattern(\n HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {\n HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent();\n HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0);\n HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1);\n HloInstruction* new_operand =\n parent_loop->AddInstruction(HloInstruction::CreateCustomCall(\n slice_shape, {stacked, offset}, \"DynamicGte\"));\n return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape(\n new_operand);\n}\nstd::optional GetDUSFusionPattern(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n VLOG(3) << \"Checking DUSFusion\";\n HloInstruction* shape_covering_instr =\n GetMostMajorShapeCoveringDynamicIndexInFusion(\n metadata, instr, HloOpcode::kDynamicUpdateSlice, 3,\n stacked_operand_idx);\n if (shape_covering_instr == nullptr) {\n return std::nullopt;\n }\n if (Match(shape_covering_instr->operand(1),\n match::Bitcast(match::Parameter()))) {\n if (shape_covering_instr->parent()->root_instruction() ==\n shape_covering_instr) {\n PatternInfo pattern_info;\n pattern_info.type = PatternType::Other;\n pattern_info.instr = instr;\n pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(\n instr->operand(2)->shape(), instr->operand(0)->shape().dimensions(0));\n pattern_info.unstacking_computation = nullptr;\n pattern_info.unstacked_instrs.push_back(instr);\n return pattern_info;\n }\n }\n return std::nullopt;\n}\nabsl::Status UnstackDUSFusionPattern(\n HloInstruction* mutable_dynamic_update_slicing_fusion,\n const Shape& slice_shape) {\n HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent();\n HloInstruction* stacked =\n mutable_dynamic_update_slicing_fusion->mutable_operand(0);\n HloInstruction* offset =\n mutable_dynamic_update_slicing_fusion->mutable_operand(1);\n HloInstruction* update =\n mutable_dynamic_update_slicing_fusion->mutable_operand(2);\n HloInstruction* new_operand =\n parent_loop->AddInstruction(HloInstruction::CreateCustomCall(\n stacked->shape(), {stacked, update, offset}, \"DynamicTuple\"));\n for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) {\n TF_RETURN_IF_ERROR(\n mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape(\n user, new_operand));\n }\n return absl::OkStatus();\n}\nstd::optional GetDUSFusionWithPadPattern(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n VLOG(3) << \"Checking DUSFusionWithPad\";\n HloInstruction* shape_covering_instr =\n GetMostMajorShapeCoveringDynamicIndexInFusion(\n metadata, instr, HloOpcode::kDynamicUpdateSlice, 3,\n stacked_operand_idx);\n if (shape_covering_instr == nullptr) {\n return std::nullopt;\n }\n if (Match(\n shape_covering_instr->operand(1),\n match::Bitcast(match::Pad(match::Parameter(), match::Constant())))) {\n if (shape_covering_instr->parent()->root_instruction() ==\n shape_covering_instr) {\n const HloInstruction* pad_instr =\n shape_covering_instr->operand(1)->operand(0);\n PatternInfo pattern_info;\n pattern_info.type = PatternType::Other;\n pattern_info.instr = instr;\n pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(\n pad_instr->shape(),\n shape_covering_instr->operand(0)->shape().dimensions(0));\n pattern_info.unstacking_computation = nullptr;\n pattern_info.unstacked_instrs.push_back(instr);\n return pattern_info;\n }\n }\n return std::nullopt;\n}\nabsl::Status UnstackDUSFusionWithPadPattern(\n HloInstruction* mutable_dynamic_update_slicing_fusion,\n const Shape& slice_shape) {\n HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent();\n HloComputation* fused_computation =\n mutable_dynamic_update_slicing_fusion->fused_instructions_computation();\n HloInstruction* stacked =\n mutable_dynamic_update_slicing_fusion->mutable_operand(\n fused_computation->root_instruction()\n ->mutable_operand(0)\n ->parameter_number());\n HloInstruction* offset =\n mutable_dynamic_update_slicing_fusion->mutable_operand(\n fused_computation->root_instruction()\n ->mutable_operand(2)\n ->parameter_number());\n HloInstruction* pad_instr = fused_computation->root_instruction()\n ->mutable_operand(1)\n ->mutable_operand(0);\n fused_computation->set_root_instruction(pad_instr, true);\n *mutable_dynamic_update_slicing_fusion->mutable_shape() = pad_instr->shape();\n HloInstruction* new_operand =\n parent_loop->AddInstruction(HloInstruction::CreateCustomCall(\n stacked->shape(),\n {stacked, mutable_dynamic_update_slicing_fusion, offset},\n \"DynamicTuple\"));\n for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) {\n if (user != new_operand) {\n TF_RETURN_IF_ERROR(\n mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape(\n user, new_operand));\n }\n }\n return absl::OkStatus();\n}\nstd::optional GetDSFusionWithAddPattern(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n VLOG(3) << \"Checking DSFusionWithAdd\";\n HloInstruction* shape_covering_instr =\n GetMostMajorShapeCoveringDynamicIndexInFusion(\n metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);\n if (shape_covering_instr == nullptr) {\n return std::nullopt;\n }\n HloComputation* fused_computation = instr->fused_instructions_computation();\n HloInstruction* fusion_root = fused_computation->root_instruction();\n HloInstruction* add_operand;\n if (Match(fusion_root,\n match::Reduce(match::Add(match::Op(&add_operand),\n match::Broadcast(match::Constant())),\n match::Constant()))) {\n if (add_operand == shape_covering_instr) {\n const int64_t num_layers = instr->operand(0)->shape().dimensions(0);\n PatternInfo pattern_info;\n pattern_info.type = PatternType::Other;\n pattern_info.instr = instr;\n pattern_info.unstacked_shape =\n MakeUnstackedShapeFromSlice(instr->shape(), num_layers);\n HloComputation::Builder builder(\"unstack_add\");\n HloInstruction* p0 =\n builder.AddInstruction(HloInstruction::CreateParameter(\n 0, fused_computation->parameter_instruction(0)->shape(), \"p0\"));\n HloInstruction* p1 =\n builder.AddInstruction(HloInstruction::CreateParameter(\n 1, fused_computation->parameter_instruction(1)->shape(), \"p1\"));\n HloInstruction* zero =\n builder.AddInstruction(MakeScalarConstantWithShape(p1->shape(), 0));\n std::vector slice_starts;\n slice_starts.reserve(shape_covering_instr->shape().rank());\n slice_starts.push_back(p1);\n for (int64_t i = 0; i < shape_covering_instr->shape().rank() - 1; i++) {\n slice_starts.push_back(zero);\n }\n HloInstruction* slice =\n builder.AddInstruction(HloInstruction::CreateDynamicSlice(\n shape_covering_instr->shape(), p0, slice_starts,\n shape_covering_instr->dynamic_slice_sizes()));\n HloInstruction* zero_reduce =\n builder.AddInstruction(MakeScalarConstantWithShape(\n ShapeUtil::MakeScalarShape(slice->shape().element_type()), 0));\n HloInstruction* reduce =\n builder.AddInstruction(HloInstruction::CreateReduce(\n instr->shape(), slice, zero_reduce, fusion_root->dimensions(),\n fused_computation->root_instruction()->to_apply()));\n HloComputation* unstack_add =\n instr->GetModule()->AddEmbeddedComputation(builder.Build());\n unstack_add->set_root_instruction(reduce);\n pattern_info.unstacking_computation = unstack_add;\n pattern_info.unstacked_instrs.push_back(instr);\n return pattern_info;\n }\n }\n return std::nullopt;\n}\nabsl::Status UnstackDSFusionWithAddPattern(\n HloInstruction* mutable_dynamic_slice_with_add_fusion,\n const Shape& slice_shape) {\n HloComputation* parent_loop = mutable_dynamic_slice_with_add_fusion->parent();\n HloInstruction* stacked =\n mutable_dynamic_slice_with_add_fusion->mutable_operand(0);\n HloInstruction* offset =\n mutable_dynamic_slice_with_add_fusion->mutable_operand(1);\n HloInstruction* new_operand =\n parent_loop->AddInstruction(HloInstruction::CreateCustomCall(\n slice_shape, {stacked, offset}, \"DynamicGte\"));\n HloInstruction* one = parent_loop->AddInstruction(MakeScalarConstantWithShape(\n ShapeUtil::MakeScalarShape(slice_shape.element_type()), 1));\n HloInstruction* broadcast = parent_loop->AddInstruction(\n HloInstruction::CreateBroadcast(slice_shape, one, {}));\n HloInstruction* add = mutable_dynamic_slice_with_add_fusion->AddInstruction(\n HloInstruction::CreateBinary(new_operand->shape(), HloOpcode::kAdd,\n new_operand, broadcast));\n TF_RETURN_IF_ERROR(\n mutable_dynamic_slice_with_add_fusion->ReplaceAllUsesWith(add));\n return absl::OkStatus();\n}\nstd::optional GetNestedDSFusionPattern(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n if (instr->opcode() != HloOpcode::kFusion) {\n return std::nullopt;\n }\n if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {\n VLOG(5) << \"Instruction not inside unrollable while body, \" << instr->name()\n << \" inside \" << instr->parent()->name();\n return std::nullopt;\n }\n WhileLoopConfig while_instr_config =\n metadata.unrollable_loop_bodies.at(instr->parent());\n VLOG(3) << \"Checking NestedDSFusionPattern\";\n HloInstruction* inner_fusion_user = nullptr;\n for (HloInstruction* fused_instr :\n instr->fused_instructions_computation()->MakeInstructionPostOrder()) {\n if (Match(fused_instr, match::Parameter(stacked_operand_idx))) {\n if (fused_instr->user_count() != 1) {\n return std::nullopt;\n }\n if (Match(fused_instr->users()[0],\n match::Fusion(match::Op(), match::Op()))) {\n inner_fusion_user = fused_instr->users()[0];\n break;\n }\n }\n }\n if (inner_fusion_user == nullptr) {\n return std::nullopt;\n }\n for (HloInstruction* inner_fusion_instr :\n inner_fusion_user->fused_instructions_computation()\n ->MakeInstructionPostOrder()) {\n if (!Match(inner_fusion_instr, match::DynamicSlice())) {\n continue;\n }\n std::optional dynamic_index =\n MatchEffectivelyStaticDynamicSliceInsideLoop(\n inner_fusion_instr,\n inner_fusion_user->fused_instructions_computation()\n ->parameter_instruction(0),\n while_instr_config);\n if (dynamic_index.has_value() && dynamic_index.value() == 0) {\n const int64_t num_layers =\n inner_fusion_user->operand(0)->shape().dimensions(0);\n PatternInfo pattern_info;\n pattern_info.type = PatternType::NestedDSFusionPattern;\n pattern_info.instr = inner_fusion_user;\n pattern_info.unstacked_shape =\n MakeUnstackedShapeFromSlice(inner_fusion_instr->shape(), num_layers);\n pattern_info.unstacking_computation =\n inner_fusion_user->fused_instructions_computation();\n pattern_info.unstacked_instrs.push_back(inner_fusion_user);\n return pattern_info;\n }\n }\n return std::nullopt;\n}\nabsl::Status UnstackNestedDSFusionPattern(\n HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {\n HloInstruction* parent_fusion =\n mutable_dynamic_slicing_fusion->parent()->FusionInstruction();\n HloInstruction* stacked_in_ds_fusion =\n mutable_dynamic_slicing_fusion->mutable_operand(0);\n CHECK_EQ(stacked_in_ds_fusion->opcode(), HloOpcode::kParameter);\n int64_t stacked_param_number = stacked_in_ds_fusion->parameter_number();\n HloInstruction* stacked =\n parent_fusion->mutable_operand(stacked_param_number);\n HloInstruction* offset_in_ds_fusion =\n mutable_dynamic_slicing_fusion->mutable_operand(1);\n CHECK_EQ(offset_in_ds_fusion->opcode(), HloOpcode::kParameter);\n HloInstruction* offset =\n parent_fusion->mutable_operand(offset_in_ds_fusion->parameter_number());\n HloInstruction* sliced_param =\n parent_fusion->fused_instructions_computation()->ReplaceParameter(\n stacked_param_number,\n HloInstruction::CreateParameter(stacked_param_number, slice_shape,\n \"sliced\"));\n HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction(\n HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(),\n sliced_param));\n HloInstruction* bitcast_fusion =\n mutable_dynamic_slicing_fusion->AddInstruction(\n HloInstruction::CreateFusion(mutable_dynamic_slicing_fusion->shape(),\n HloInstruction::FusionKind::kLoop,\n bitcast));\n TF_RETURN_IF_ERROR(\n mutable_dynamic_slicing_fusion->ReplaceAllUsesWith(bitcast_fusion));\n HloInstruction* new_operand =\n parent_fusion->AddInstruction(HloInstruction::CreateCustomCall(\n slice_shape, {stacked, offset}, \"DynamicGte\"));\n return parent_fusion->ReplaceOperandWithDifferentShape(\n sliced_param->parameter_number(), new_operand);\n}\nstd::optional GetDSAndDUSPattern(const UnstackerMetadata& metadata,\n const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n VLOG(3) << \"Checking DSAndDUSPattern\";\n if (instr->opcode() != HloOpcode::kFusion) {\n return std::nullopt;\n }\n const HloInstruction* stacked = instr->operand(stacked_operand_idx);\n if (stacked->user_count() != 2) {\n return std::nullopt;\n }\n HloInstruction* shape_covering_ds_instr =\n GetMostMajorShapeCoveringDynamicIndexInFusion(\n metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);\n if (shape_covering_ds_instr == nullptr) {\n return std::nullopt;\n }\n HloInstruction* bitcast_operand = nullptr;\n if (!Match(instr->fused_instructions_computation()->root_instruction(),\n match::Bitcast(match::Op(&bitcast_operand)))) {\n return std::nullopt;\n }\n if (bitcast_operand != shape_covering_ds_instr) {\n return std::nullopt;\n }\n if (!GetDUSFusionPattern(metadata, stacked->users()[1],\n stacked->users()[1]->operand_index(stacked))) {\n return std::nullopt;\n }\n PatternInfo pattern_info;\n pattern_info.type = PatternType::Other;\n pattern_info.instr = instr;\n const Shape& slice_shape = instr->shape();\n const int64_t num_layers = instr->operand(0)->shape().dimensions(0);\n pattern_info.unstacked_shape =\n MakeUnstackedShapeFromSlice(slice_shape, num_layers);\n pattern_info.unstacking_computation = instr->fused_instructions_computation();\n pattern_info.unstacked_instrs.push_back(instr);\n pattern_info.unstacked_instrs.push_back(stacked->users()[1]);\n return pattern_info;\n}\nabsl::Status UnstackDSAndDUSPattern(HloInstruction* mutable_dynamic_slice,\n const Shape& slice_shape) {\n HloInstruction* stacked_gte = mutable_dynamic_slice->mutable_operand(0);\n int64_t stacked_gte_index = stacked_gte->tuple_index();\n HloComputation* parent = stacked_gte->parent();\n ShapeUtil::UpdateTupleShape(stacked_gte->shape(), stacked_gte_index,\n parent->root_instruction()->mutable_shape());\n HloComputation* parent_loop = mutable_dynamic_slice->parent();\n HloInstruction* stacked = mutable_dynamic_slice->mutable_operand(0);\n HloInstruction* offset = mutable_dynamic_slice->mutable_operand(1);\n HloInstruction* new_operand =\n parent_loop->AddInstruction(HloInstruction::CreateCustomCall(\n slice_shape, {stacked, offset}, \"DynamicGte\"));\n TF_RETURN_IF_ERROR(\n mutable_dynamic_slice->ReplaceAllUsesWithDifferentShape(new_operand));\n HloInstruction* mutable_dynamic_update_slice = stacked_gte->users()[1];\n TF_RETURN_IF_ERROR(\n UnstackDUSFusionPattern(mutable_dynamic_update_slice, slice_shape));\n return absl::OkStatus();\n}\nstd::optional GetReduceFusionPattern(\n const UnstackerMetadata& metadata, const HloInstruction* instr,\n int64_t stacked_operand_idx) {\n VLOG(3) << \"Checking ReduceFusion\";\n HloInstruction* shape_covering_instr =\n GetMostMajorShapeCoveringDynamicIndexInFusion(\n metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);\n if (shape_covering_instr == nullptr) {\n return std::nullopt;\n }\n HloInstruction* reduce_operand = nullptr;\n HloInstruction* fusion_root =\n instr->fused_instructions_computation()->root_instruction();\n if (Match(fusion_root, match::Reduce(match::Op(&reduce_operand),\n match::ConstantScalar())) &&\n Match(fusion_root->to_apply()->root_instruction(),\n match::Add(match::Parameter(), match::Parameter()))) {\n if (reduce_operand == shape_covering_instr) {\n PatternInfo pattern_info;\n pattern_info.type = PatternType::Other;\n pattern_info.instr = instr;\n const Shape& slice_shape = instr->shape();\n const int64_t num_layers = instr->operand(0)->shape().dimensions(0);\n pattern_info.unstacked_shape =\n MakeUnstackedShapeFromSlice(slice_shape, num_layers);\n pattern_info.unstacking_computation =\n instr->fused_instructions_computation();\n pattern_info.unstacked_instrs.push_back(instr);\n return pattern_info;\n }\n }\n return std::nullopt;\n}\nabsl::Status UnstackReduceFusionPattern(HloInstruction* mutable_reduce_fusion,\n const Shape& slice_shape) {\n HloComputation* parent_loop = mutable_reduce_fusion->parent();\n HloInstruction* stacked = mutable_reduce_fusion->mutable_operand(0);\n HloInstruction* offset = mutable_reduce_fusion->mutable_operand(1);\n HloInstruction* new_operand =\n parent_loop->AddInstruction(HloInstruction::CreateCustomCall(\n slice_shape, {stacked, offset}, \"DynamicGte\"));\n return mutable_reduce_fusion->ReplaceAllUsesWithDifferentShape(new_operand);\n}\n}; \nabsl::StatusOr HloUnstacker::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n TF_ASSIGN_OR_RETURN(auto metadata,\n UnstackerMetadata::Create(module, unfuse_slice_));\n metadata.custom_handlers.push_back(\n std::make_pair(GetDSAndDUSPattern, UnstackDSAndDUSPattern));\n metadata.custom_handlers.push_back(\n std::make_pair(GetDSFusionPattern, UnstackDSFusionPattern));\n metadata.custom_handlers.push_back(\n std::make_pair(GetDUSFusionPattern, UnstackDUSFusionPattern));\n metadata.custom_handlers.push_back(std::make_pair(\n GetDUSFusionWithPadPattern, UnstackDUSFusionWithPadPattern));\n metadata.custom_handlers.push_back(\n std::make_pair(GetDSFusionWithAddPattern, UnstackDSFusionWithAddPattern));\n metadata.custom_handlers.push_back(\n std::make_pair(GetReduceFusionPattern, UnstackReduceFusionPattern));\n metadata.custom_handlers.push_back(\n std::make_pair(GetNestedDSFusionPattern, UnstackNestedDSFusionPattern));\n metadata.custom_handlers.push_back(std::make_pair(\n GetDSFusionNoBitcastPattern, UnstackDSFusionNoBitcastPattern));\n std::vector entry_loops;\n for (HloInstruction* instr :\n module->entry_computation()->MakeInstructionPostOrder()) {\n if (Match(instr, match::While(match::Tuple())) &&\n Match(instr->while_body()->root_instruction(), match::Tuple())) {\n entry_loops.push_back(instr);\n }\n }\n bool unstacked = false;\n std::vector unstacked_instructions;\n for (HloInstruction* loop : entry_loops) {\n for (int64_t i = 0; i < loop->shape().tuple_shapes_size(); ++i) {\n if (loop->while_init()->operand(i)->shape().IsTuple()) {\n continue;\n }\n VLOG(3) << \"Attempting to unstack \" << loop->name() << \" at \" << i\n << \" = \" << loop->while_init()->operand(i)->shape().ToString(true)\n << loop->while_init()->operand(i)->ToShortString();\n unstacked |=\n UnstackWhileOperandAtIndex(metadata, loop, i, unstacked_instructions);\n VLOG(3) << \"###################\";\n }\n }\n if (!unstacked) {\n return false;\n }\n TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());\n std::vector loops_to_unroll;\n for (const HloInstruction* instr : unstacked_instructions) {\n HloInstruction* loop = metadata.bodies[instr->parent()];\n if (std::find(loops_to_unroll.begin(), loops_to_unroll.end(), loop) ==\n loops_to_unroll.end()) {\n loops_to_unroll.push_back(loop);\n }\n }\n for (int64_t i = loops_to_unroll.size() - 1; i >= 0; --i) {\n HloInstruction* loop = loops_to_unroll[i];\n TF_ASSIGN_OR_RETURN(UnrollResult unroll_result,\n WhileLoopUnroller::UnrollAndReturnReplacement(\n loop, -1,\n false,\n true, false));\n bool unrolled = unroll_result.unrolled;\n CHECK(unrolled);\n }\n VLOG(3) << \"after unstacking \\n\" << module->ToString();\n return true;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_unstacker.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing UnstackerTest = HloTestBase;\nint64_t GetInstrCountWithOpcodeInEntry(HloModule* module, HloOpcode opcode) {\n int64_t instr_with_opcode_count = 0;\n for (HloInstruction* instr :\n module->entry_computation()->MakeInstructionPostOrder()) {\n if (instr->opcode() == opcode) {\n instr_with_opcode_count++;\n }\n }\n return instr_with_opcode_count;\n}\nTEST_F(UnstackerTest, UnstackDSFusionPattern) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} \n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n %fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice \n conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf \n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body \n while_use = s8[3,128,128] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),\n 0);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, NotUnstackDSFusionPattern) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} \n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.tuple {\n %param_0.51117 = s8[3,128,128] parameter(0)\n mult = multiply(param_0.51117, param_0.51117)\n ROOT out = tuple(param_0.51117, mult)\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n %fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice\n conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n fusion_mult = (s8[3,128,128], s8[3,128,128]) fusion(s8[3,128,128] p1), kind=kLoop, calls=%fused_computation.tuple\n mult = s8[3,128,128] get-tuple-element(fusion_mult), index=1\n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, mult)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body \n while_use = s8[3,128,128] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_FALSE(unstacked);\n}\nTEST_F(UnstackerTest, UnstackReduceFusionPattern) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n dynamic-slice.609.reduce_sub_computation {\n lhs.53 = s8[] parameter(0)\n rhs.53 = s8[] parameter(1)\n ROOT add.3090 = s8[] add(lhs.53, rhs.53)\n }\n fused_computation.1096.clone {\n param_0.5572 = s8[3,128,128] parameter(0)\n param_1.6711 = s32[]{:T(128)} parameter(1)\n constant.12008 = s32[]{:T(128)} constant(0)\n dynamic-slice.1545 = s8[1,128,128] dynamic-slice(param_0.5572, param_1.6711, constant.12008, constant.12008), dynamic_slice_sizes={1,128, 128}\n constant.12009 = s8[] constant(-0)\n ROOT reduce.919 = s8[128,128] reduce(dynamic-slice.1545, constant.12009), dimensions={0}, to_apply=dynamic-slice.609.reduce_sub_computation\n } \n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n %fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.1096.clone \n conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf \n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body \n while_use = s8[3,128,128] get-tuple-element(while.out), index=2 \n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcast) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} \n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n %fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice\n bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)\n conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf \n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body \n while_use = s8[3,128,128] get-tuple-element(while.out), index=2 \n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),\n 0);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcastKeepFused) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} \n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n %fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice\n bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)\n conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf \n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body \n while_use = s8[3,128,128] get-tuple-element(while.out), index=2 \n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n auto unfuse = [](HloInstruction* instruction) { return false; };\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked,\n HloUnstacker(unfuse).Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 0);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),\n 3);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackDSFusionPatternWithDifferentLayout) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.30.clone (param_0.153: bf16[32,4,64,64,3], param_1.123: s32[]) -> bf16[64,4,64,3] {\n %param_0.153 = bf16[32,4,64,64,3]{2,1,4,3,0} parameter(0)\n %param_1.123 = s32[]{:T(128)} parameter(1)\n %constant.227 = s32[]{:T(128)} constant(0)\n %dynamic-slice.5 = bf16[1,4,64,64,3]{2,1,4,3,0} dynamic-slice(bf16[32,4,64,64,3]{2,1,4,3,0} %param_0.153, s32[]{:T(128)} %param_1.123, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227), dynamic_slice_sizes={1,4,64,64,3}\n ROOT %bitcast.102 = bf16[64,4,64,3]{0,1,3,2} bitcast(bf16[1,4,64,64,3]{2,1,4,3,0} %dynamic-slice.5)\n }\n %while.body (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> (s32[], bf16[8,128], bf16[32,4,64,64,3]) {\n wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = bf16[32,4,64,64,3]{2,1,4,3,0} get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n %fusion.67830 = bf16[64,4,64,3]{0,1,3,2} fusion(p1, i), kind=kLoop, calls=%fused_computation.30.clone\n ROOT out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(inc, p0, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> pred[] {\n wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(32)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = bf16[32,4,64,64,3] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) while(while.input), condition=%while.cond , body=%while.body\n while_use = bf16[32,4,64,64,3] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice),\n 32);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),\n 0);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt));\n}\nTEST_F(UnstackerTest, UnstackNestedDSFusionPattern) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[3,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n fusion.conv = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner\n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body\n while_use = s8[3,128,128] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDynamicIndex) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[6,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[6,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[6,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[6,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[6,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[6,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> (s32[], bf16[8,128], s8[6,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[6,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n two = s32[] constant(2)\n mult = s32[] multiply(i, two)\n fusion.conv = bf16[8,128] fusion(p0, p1, mult), kind=kOutput, calls=%fused_computation.inner\n ROOT out = (s32[], bf16[8,128], s8[6,128,128]) tuple(inc, fusion.conv, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[6,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[6,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[6,128,128]) while(while.input), condition=%while.cond , body=%while.body\n while_use = s8[6,128,128] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithMultipleIndex) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice.1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[4,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.slice.2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[4,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[4,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[4,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[4,128,128] get-tuple-element(wide_p), index=2\n p2 = s8[4,128,128] get-tuple-element(wide_p), index=3\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n fusion.conv.1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1\n fusion.conv.2 = bf16[8,128] fusion(p0, p2, i), kind=kOutput, calls=%fused_computation.inner.2\n plus = bf16[8,128] add(fusion.conv.1, fusion.conv.2)\n ROOT out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(inc, plus, p1, p2)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(4)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[4,128,128] parameter(0)\n p1 = s8[4,128,128] parameter(1)\n p2 = bf16[8,128] parameter(2)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(init, p2, p0, p1)\n while.out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDiffereOperandsOrder) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner (param_1.30691: s8[3,128,128], p2: s32[], param_0.34523: bf16[8,128]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(2)\n %param_1.30691 = s8[3,128,128] parameter(0)\n p2 = s32[] parameter(1)\n %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n fusion.conv = bf16[8,128] fusion(p1, i, p0), kind=kOutput, calls=%fused_computation.inner\n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body\n while_use = s8[3,128,128] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithSameUnstackingComps) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[3,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[3,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n fusion.conv1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1\n fusion.conv2 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.2\n add = bf16[8,128] add(fusion.conv1, fusion.conv2)\n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, add, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body\n while_use = s8[3,128,128] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, NotUnstackNestedDSFusionPatternWithSameUnstackingComps) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n }\n %fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[3,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n p0 = bf16[8,128] get-tuple-element(wide_p), index=1\n p1 = s8[3,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n %fusion.67831 = s8[128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.2\n %fusion.67830 = s8[1,128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.1\n %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)\n ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, p0, p1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(3)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n ENTRY main {\n p0 = s8[3,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(0)\n while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)\n while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body\n while_use = s8[3,128,128] get-tuple-element(while.out), index=2\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_FALSE(unstacked);\n}\nTEST_F(UnstackerTest, UnstackNestedDSFusionPatternSingleNestedLoop) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[4,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[4,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1\n inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner\n ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)\n }\n %while.cond.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(4)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n %while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n param0 = bf16[8,128] get-tuple-element(wide_p), index=1\n param1 = s8[4,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(2)\n zero = s32[] constant(0)\n mult = s32[] multiply(i, one)\n inner.in = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)\n inner.out = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in), condition=%while.cond.inner, body=%while.body.inner\n fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out), index=1\n ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)\n }\n %while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(20)\n add = s32[] add(%constant.12857, %constant.12857)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT\n }\n ENTRY main {\n weight = s8[4,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(1)\n while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)\n while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body\n ROOT out = bf16[8,128] get-tuple-element(while.out), index=1\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 4);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackNestedDSFusionPatternTwoNestedLoops) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[4,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[4,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice1\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1\n inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner1\n ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)\n }\n %while.cond.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(4)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n %while.body1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n param0 = bf16[8,128] get-tuple-element(wide_p), index=1\n param1 = s8[4,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(2)\n zero = s32[] constant(0)\n mult = s32[] multiply(i, one)\n inner.in.1 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)\n inner.out.1 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.1), condition=%while.cond.inner1, body=%while.body.inner1\n fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.1), index=1\n ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)\n }\n %while.cond1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(20)\n add = s32[] add(%constant.12857, %constant.12857)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT\n }\n %fused_computation.slice2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {\n %param_0.51117 = s8[4,128,128] parameter(0)\n p1 = s32[] parameter(1)\n %constant.85694 = s32[] constant(0)\n %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}\n ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)\n }\n %fused_computation.inner2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {\n %param_0.34523 = bf16[8,128] parameter(0)\n %param_1.30691 = s8[4,128,128] parameter(1)\n p2 = s32[] parameter(2)\n %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice2\n ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf\n }\n %while.body.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1\n inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(1)\n inc = s32[] add(i, one)\n fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner2\n ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)\n }\n %while.cond.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(4)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT\n }\n %while.body2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n param0 = bf16[8,128] get-tuple-element(wide_p), index=1\n param1 = s8[4,128,128] get-tuple-element(wide_p), index=2\n one = s32[] constant(2)\n zero = s32[] constant(0)\n mult = s32[] multiply(i, one)\n inner.in.2 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)\n inner.out.2 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.2), condition=%while.cond.inner2, body=%while.body.inner2\n fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.2), index=1\n ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)\n }\n %while.cond2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {\n wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)\n i = s32[] get-tuple-element(wide_p), index=0\n %constant.12857 = s32[] constant(20)\n add = s32[] add(%constant.12857, %constant.12857)\n ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT\n }\n ENTRY main {\n weight = s8[4,128,128] parameter(0)\n p1 = bf16[8,128] parameter(1)\n init = s32[] constant(1)\n while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)\n while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond1 , body=%while.body1\n init2 = s32[] get-tuple-element(while.out), index=0\n second.while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init2, p1, weight)\n second.while.out = (s32[], bf16[8,128], s8[4,128,128]) while(second.while.input), condition=%while.cond2 , body=%while.body2\n out = bf16[8,128] get-tuple-element(while.out), index=1\n second.out = bf16[8,128] get-tuple-element(second.while.out), index=1\n ROOT result = bf16[8,128] add(out, second.out)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackDSAndDUSPattern) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: s32[4,3], offset: s32[]) -> s32[3] {\n %param_0.51117 = s32[4,3] parameter(0)\n offset = s32[] parameter(1)\n zero = s32[] constant(0)\n %dynamic-slice.22040 = s32[1,3] dynamic-slice(s32[4,3] %param_0.51117, offset, zero), dynamic_slice_sizes={1,3}\n ROOT %bitcast.31250 = s32[3] bitcast(s32[1,3] %dynamic-slice.22040)\n }\n %fused_computation.update.slice (param_0.51117: s32[4,3], p1: s32[], p2: s32[3]) -> s32[4,3] {\n %param_0.51117 = s32[4,3] parameter(0)\n %p1 = s32[] parameter(1)\n %p2 = s32[3] parameter(2)\n %zero = s32[] constant(0)\n %bitcast.31250 = s32[1,3] bitcast(%p2)\n ROOT output_dus = s32[4,3]{1,0} dynamic-update-slice(%param_0.51117, %bitcast.31250, %p1, zero)\n }\n SimpleLoop.body {\n loop_var.1 = (s32[], s32[4,3]) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n get-tuple-element.2 = s32[4,3] get-tuple-element(loop_var.1), index=1\n zero = s32[] constant(0)\n some_const = s32[3] constant({0,1,2})\n constant.1 = s32[] constant(1)\n idx = s32[] add(get-tuple-element.1, constant.1)\n ds = s32[3]{0} fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice\n update = s32[3] add(ds, ds)\n dus = s32[3] dynamic-update-slice(ds, update, zero)\n output = s32[4,3] fusion(get-tuple-element.2, get-tuple-element.1, dus), kind=kLoop, calls=%fused_computation.update.slice\n ROOT tuple = (s32[], s32[4,3]) tuple(idx, output)\n }\n SimpleLoop.condition {\n loop_var.1 = (s32[], s32[4,3]) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n constant.2 = s32[] constant(4)\n ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT\n }\n ENTRY SimpleLoop {\n reference = s32[4,3] parameter(0)\n zero = s32[] constant(0)\n zero1 = s32[] constant(0)\n one = s32[] constant(1)\n tuple.1 = (s32[], s32[4,3]) tuple(zero, reference)\n while = (s32[], s32[4,3]) while(tuple.1), condition=SimpleLoop.condition, body=SimpleLoop.body\n ROOT out = s32[] get-tuple-element(while), index=0\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackDSAndDUSPatternNestedLoop) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] {\n %param_0.51117 = bf16[4,1,8,257,128] parameter(0)\n offset = s32[] parameter(1)\n zero = s32[] constant(0)\n %dynamic-slice.22040 = bf16[1,1,8,257,128]\n dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128} \n ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)\n }\n %fused_computation.slice.2 (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] {\n %param_0.51117 = bf16[4,1,8,257,128] parameter(0)\n offset = s32[] parameter(1)\n zero = s32[] constant(0)\n %dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128} \n ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)\n }\n inner.body {\n loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0) \n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 \n get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1 \n get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2 \n sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice \n sliced.2 = bf16[1,8,257,128] fusion(get-tuple-element.3, get-tuple-element.1), kind=kLoop,calls=%fused_computation.slice.2 \n temp = bf16[1,8,257,128] add(sliced, sliced.2) \n one = s32[] constant(1) idx = s32[] add(get-tuple-element.1, one) \n ROOT out = tuple(idx, get-tuple-element.2, get-tuple-element.3)\n }\n inner.condition {\n loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128])\n parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),\n index=0 constant.2 = s32[] constant(4) ROOT less-than = pred[]\n compare(get-tuple-element.1, constant.2), direction=LT\n }\n outer.body {\n loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0) \n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1\n get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2\n zero = s32[] constant(0)\n buffer = bf16[4,1,8,257,128] custom-call(), custom_call_target=\"AllocateBuffer\"\n inner.input = tuple(zero, buffer, get-tuple-element.2)\n inner = while(inner.input), condition=inner.condition, body=inner.body\n out1 = bf16[4,1,8,257,128] get-tuple-element(inner), index=1\n one = s32[] constant(1)\n idx = s32[] add(get-tuple-element.1, one)\n ROOT tuple = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) tuple(idx, out1, get-tuple-element.3)\n }\n outer.condition {\n loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128])\n parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),\n index=0 constant.2 = s32[] constant(4) mul = s32[]\n multiply(get-tuple-element.1, constant.2) ROOT less-than = pred[]\n compare(get-tuple-element.1, mul), direction=LT\n }\n ENTRY SimpleLoop {\n param1 = bf16[4,1,8,257,128] parameter(0)\n param2 = bf16[4,1,8,257,128] parameter(1)\n zero = s32[] constant(0)\n zero1 = s32[] constant(0)\n one = s32[] constant(1)\n tuple.1 = tuple(zero, param1, param2)\n while = while(tuple.1), condition=outer.condition, body=outer.body\n ROOT out = s32[] get-tuple-element(while), index=0\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\nTEST_F(UnstackerTest, UnstackDSAndDUSPatternLoopFeedingLoop) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n %fused_computation.update.slice (param_0.51117: bf16[4,1,8,257,128], p1: s32[], param_0.51118: bf16[1,8,257,128]) -> bf16[4,1,8,257,128] {\n %param_0.51117 = bf16[4,1,8,257,128] parameter(0)\n p1 = s32[] parameter(1)\n %param_0.51118 = bf16[1,8,257,128] parameter(2)\n bitcast = bf16[1,1,8,257,128] bitcast(param_0.51118)\n %constant.85694 = s32[] constant(0)\n ROOT %dynamic-update-slice.22040 = bf16[4,1,8,257,128] dynamic-update-slice(bf16[4,1,8,257,128] %param_0.51117, bitcast, p1, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694)\n }\n %fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset:s32[]) -> bf16[1,8,257,128] {\n %param_0.51117 = bf16[4,1,8,257,128] parameter(0)\n offset = s32[] parameter(1)\n zero = s32[] constant(0)\n %dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128} \n ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)\n }\n first.body {\n loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0) \n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0\n get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1\n constant = bf16[1,8,257,128] constant({...})\n sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice\n tmp = bf16[1,8,257,128] add(sliced, sliced)\n one = s32[] constant(1)\n idx = s32[] add(get-tuple-element.1, one)\n ROOT out = tuple(idx, get-tuple-element.2)\n }\n first.condition {\n loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0) \n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n constant.2 = s32[] constant(4) \n ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT\n }\n next.body {\n loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0) \n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0\n get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1\n constant = bf16[1,8,257,128] constant({...})\n update.sliced = bf16[4,1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1, constant), kind=kLoop, calls=%fused_computation.update.slice\n one = s32[] constant(1)\n idx = s32[] add(get-tuple-element.1, one)\n ROOT out = tuple(idx, update.sliced)\n }\n next.condition {\n loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n constant.2 = s32[] constant(4) \n ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT\n }\n ENTRY SimpleLoop {\n param1 = bf16[4,1,8,257,128] parameter(0)\n param2 = bf16[4,1,8,257,128] parameter(1)\n zero = s32[] constant(0)\n zero1 = s32[] constant(0)\n one = s32[] constant(1)\n tuple.1 = tuple(zero, param1)\n while = while(tuple.1), condition=first.condition, body=first.body\n while.out = bf16[4,1,8,257,128] get-tuple-element(while), index=1\n next.input = tuple(zero, while.out)\n next = while(next.input), condition=next.condition, body=next.body\n ROOT out = s32[] get-tuple-element(next), index=0\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n}\nTEST_F(UnstackerTest, UnstackDUSFusionWithPadPatternLoopFeedingLoop) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n fused_computation.75.clone {\n param_0.5713 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0)\n param_2.4396 = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} parameter(2)\n constant.12166 = bf16[]{:T(256)} constant(0)\n pad.496 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} pad(param_2.4396, constant.12166), padding=0_0x0_0x0_256x0_0\n bitcast.1262 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} bitcast(pad.496)\n param_1.6823 = s32[]{:T(128)} parameter(1)\n constant.12165 = s32[]{:T(128)} constant(0)\n ROOT dynamic-update-slice.193 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(param_0.5713, bitcast.1262, param_1.6823, constant.12165, constant.12165, constant.12165, constant.12165)\n } \n fused_computation.1 {\n param_0.5712 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}parameter(0)\n param_1.6822 = s32[]{:T(128)} parameter(1)\n constant.12164 = s32[]{:T(128)} constant(0)\n dynamic-slice.1597 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-slice(param_0.5712, param_1.6822, constant.12164, constant.12164, constant.12164, constant.12164), dynamic_slice_sizes={1,1,8,513,128}\n ROOT bitcast.1261 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} bitcast(dynamic-slice.1597)\n }\n first.body {\n wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)\n get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0\n constant.12144..sunk.2 = s32[]{:T(128)} constant(1)\n add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)\n get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1\n fusion.2381 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.1\n tmp = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} add(fusion.2381, fusion.2381)\n ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178)\n } \n first.cond {\n wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)\n get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0\n constant.12162 = s32[]{:T(128)} constant(2)\n ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT\n }\n wide.region_54.2652.clone_spmd {\n wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)\n get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0\n constant.12144..sunk.2 = s32[]{:T(128)} constant(1)\n add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)\n get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1\n update = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} constant({...})\n fusion.2382 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177, update), kind=kLoop, calls=fused_computation.75.clone\n ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, fusion.2382)\n } \n wide.region_55.2732.clone_spmd {\n wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)\n get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0\n constant.12162 = s32[]{:T(128)} constant(2)\n ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT\n }\n ENTRY main {\n p0 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0)\n init = s32[]{:T(128)} constant(0)\n first.input = tuple(init, p0)\n first.out = while(first.input), condition=first.cond , body=first.body\n o1 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(first.out), index=1\n input = tuple(init, o1)\n out = while(input), condition=wide.region_55.2732.clone_spmd , body=wide.region_54.2652.clone_spmd\n ROOT res = s32[]{:T(128)} get-tuple-element(out), index=0\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n}\nTEST_F(UnstackerTest, UnstackDUSFusionWithAddPattern) {\n std::string hlo_string = R\"(\n HloModule SimpleLoop\n add.2771.reduce_sub_computation {\n lhs.44 = bf16[] parameter(0)\n rhs.44 = bf16[] parameter(1)\n ROOT add.3079 = bf16[] add(lhs.44, rhs.44)\n }\n fused_computation.75.clone {\n param_0.31658 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0)\n param_1.26202 = s32[]{:T(128)} parameter(1)\n constant.47557 = s32[]{:T(128)} constant(0)\n dynamic-slice.12289 = bf16[1,4096]{1,0:T(2,128)(2,1)} dynamic-slice(param_0.31658, param_1.26202, constant.47557), dynamic_slice_sizes={1,4096}\n constant.47559 = bf16[]{:T(256)} constant(1)\n broadcast.39214 = bf16[1,4096]{1,0:T(2,128)(2,1)} broadcast(constant.47559), dimensions={}\n add.13176 = bf16[1,4096]{1,0:T(2,128)(2,1)} add(dynamic-slice.12289, broadcast.39214)\n constant.47558 = bf16[] constant(-0)\n ROOT reduce.8210 = bf16[4096]{0:T(1024)(128)(2,1)} reduce(add.13176, constant.47558), dimensions={0}, to_apply=add.2771.reduce_sub_computation\n } \n first.body {\n wide.param.29 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0)\n get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0\n constant.12144..sunk.2 = s32[]{:T(128)} constant(1)\n add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)\n get-tuple-element.12178 = bf16[2,4096]{1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1\n fusion.2381 = bf16[4096]{0:T(1024)(128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.75.clone\n tmp = bf16[4096]{0:T(1024)(128)(2,1)} add(fusion.2381, fusion.2381)\n ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178)\n } \n first.cond {\n wide.param.28 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0)\n get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0\n constant.12162 = s32[]{:T(128)} constant(2)\n ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT\n }\n ENTRY main {\n p0 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0)\n init = s32[]{:T(128)} constant(0)\n first.input = tuple(init, p0)\n first.out = while(first.input), condition=first.cond , body=first.body\n ROOT o1 = s32[]{:T(128)} get-tuple-element(first.out), index=0\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto original = module->Clone();\n TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));\n EXPECT_TRUE(unstacked);\n EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),\n std::nullopt, false));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1126,"cells":{"ID":{"kind":"string","value":"9bda38a9-493d-4316-85e9-9599ccb99c9a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dynamic_padder"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/dynamic_padder.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/dynamic_padder_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/dynamic_padder.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/functional/function_ref.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/comparison_util.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/dynamic_parameter_binding.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/dynamic_dimension_inference.h\"\n#include \"xla/service/dynamic_window_utils.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/service/tuple_util.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/tsl/lib/monitoring/gauge.h\"\n#include \"xla/util.h\"\n#include \"xla/window_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nauto* dynamic_padding_gauge = tsl::monitoring::Gauge::New(\n \"/tensorflow/core/use_dynamic_padding_gauge\",\n \"Tracks if dynamic padder is used.\");\nabsl::StatusOr ChooseIdentityValue(HloInstruction* inst,\n int64_t operand_number) {\n if (inst->IsElementwise()) {\n return nullptr;\n }\n if (inst->opcode() == HloOpcode::kSelectAndScatter ||\n inst->IsCustomCall(\"DynamicSelectAndScatterSamePadding\")) {\n if (operand_number == 1) {\n return inst->mutable_operand(2);\n }\n TF_RET_CHECK(operand_number == 0);\n HloComputation* select = inst->called_computations()[0];\n if (Match(select->root_instruction(),\n match::Compare(match::Parameter(), match::Parameter())\n .WithComparisonDirection(ComparisonDirection::kGe))) {\n return inst->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::MinValue(inst->operand(0)->shape().element_type())));\n } else {\n return Unimplemented(\n \"Only select and scatter with `max` as select function is \"\n \"supported, got %s\",\n select->ToString());\n }\n }\n switch (inst->opcode()) {\n case HloOpcode::kReduce: {\n auto* reduce = Cast(inst);\n TF_RET_CHECK(operand_number < reduce->input_count())\n << \"Only data operand with dynamic dimension is valid.\";\n int64_t init_value_index = reduce->input_count() + operand_number;\n return inst->mutable_operand(init_value_index);\n }\n case HloOpcode::kReduceWindow: {\n auto* reduce_window = Cast(inst);\n TF_RET_CHECK(operand_number < reduce_window->input_count())\n << \"Only data operand with dynamic dimension is valid.\";\n int64_t init_value_index = reduce_window->input_count() + operand_number;\n return inst->mutable_operand(init_value_index);\n }\n case HloOpcode::kConvolution:\n case HloOpcode::kDot: {\n PrimitiveType ptype = inst->operand(0)->shape().element_type();\n return inst->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::Zero(ptype)));\n }\n case HloOpcode::kPad:\n return inst->mutable_operand(1);\n case HloOpcode::kScatter: {\n if (operand_number != 1) {\n return nullptr;\n }\n PrimitiveType indices_ptype =\n inst->operand(operand_number)->shape().element_type();\n return inst->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::MaxValue(indices_ptype)));\n }\n case HloOpcode::kParameter:\n case HloOpcode::kGather:\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kDynamicUpdateSlice:\n case HloOpcode::kGetDimensionSize:\n case HloOpcode::kSetDimensionSize:\n case HloOpcode::kConcatenate:\n case HloOpcode::kReshape:\n case HloOpcode::kReverse:\n case HloOpcode::kTuple:\n case HloOpcode::kAllReduce:\n case HloOpcode::kReduceScatter:\n case HloOpcode::kBroadcast:\n case HloOpcode::kTranspose:\n case HloOpcode::kSort:\n case HloOpcode::kSlice:\n case HloOpcode::kDomain:\n return nullptr;\n case HloOpcode::kCustomCall:\n return nullptr;\n default:\n return UnimplementedStrCat(\"Unimplemented padding for instruction: \",\n inst->ToString());\n }\n}\nabsl::StatusOr ReplaceGetSize(\n HloInstruction* instr,\n DynamicDimensionInference* dynamic_dimension_inference) {\n if (instr->opcode() != HloOpcode::kGetDimensionSize) {\n return false;\n }\n HloComputation* computation = instr->parent();\n TF_ASSIGN_OR_RETURN(auto legal_shape,\n ShapeInference::InferGetDimensionSizeShape(\n instr->operand(0)->shape(), instr->dimension()));\n TF_RET_CHECK(ShapeUtil::Equal(instr->shape(), legal_shape))\n << \"instr->shape() \" << instr->shape().ToString() << \" , \"\n << \"legal_shape \" << legal_shape.ToString();\n TF_RET_CHECK(ShapeUtil::HasPrimitiveType(instr->shape(), S32));\n HloInstruction* operand = instr->mutable_operand(0);\n int64_t dim = instr->dimension();\n HloInstruction* dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(operand, {}, dim);\n if (dynamic_size != nullptr) {\n TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(dynamic_size));\n dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(\n instr, dynamic_size);\n } else {\n int32_t size = instr->operand(0)->shape().dimensions(dim);\n HloInstruction* new_instr = computation->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(size)));\n TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(new_instr));\n dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(instr,\n new_instr);\n }\n return true;\n}\nabsl::StatusOr ReplaceSetSize(HloInstruction* instr) {\n if (instr->opcode() != HloOpcode::kSetDimensionSize) {\n return false;\n }\n TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(\n instr->shape(), instr->operand(0)->shape()))\n << \"instr->shape() \" << instr->shape().ToString() << \" , \"\n << \"instruction operand shape \" << instr->operand(0)->shape();\n HloInstruction* operand = instr->mutable_operand(0);\n TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));\n return true;\n}\nabsl::StatusOr ReplaceSetBound(HloInstruction* instr) {\n if (instr->opcode() != HloOpcode::kCustomCall ||\n instr->custom_call_target() != \"SetBound\") {\n return false;\n }\n TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(\n instr->shape(), instr->operand(0)->shape()))\n << \"instr->shape() \" << instr->shape().ToString() << \" , \"\n << \"instruction operand shape \" << instr->operand(0)->shape();\n HloInstruction* operand = instr->mutable_operand(0);\n TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));\n return true;\n}\nbool ShouldSkipPadOnOperand(\n const HloInstruction* inst, int64_t operand_num, int64_t dimension,\n const absl::flat_hash_set& execution_threads) {\n switch (inst->opcode()) {\n case HloOpcode::kConvolution: {\n if (operand_num == 0) {\n if (dimension ==\n inst->convolution_dimension_numbers().input_batch_dimension()) {\n return true;\n }\n const auto& spatial_dims =\n inst->convolution_dimension_numbers().input_spatial_dimensions();\n for (int64_t spatial_dim = 0; spatial_dim < spatial_dims.size();\n ++spatial_dim) {\n if (spatial_dims[spatial_dim] == dimension &&\n inst->window().dimensions(spatial_dim).size() == 1) {\n return true;\n }\n }\n }\n return operand_num == 1 &&\n (dimension == inst->convolution_dimension_numbers()\n .kernel_output_feature_dimension());\n }\n case HloOpcode::kDot: {\n if (operand_num == 0) {\n return !absl::c_linear_search(\n inst->dot_dimension_numbers().lhs_contracting_dimensions(),\n dimension);\n }\n return !absl::c_linear_search(\n inst->dot_dimension_numbers().rhs_contracting_dimensions(),\n dimension);\n }\n case HloOpcode::kReduce:\n return !absl::c_linear_search(inst->dimensions(), dimension);\n case HloOpcode::kSelectAndScatter:\n case HloOpcode::kReduceWindow:\n return inst->window().dimensions(dimension).size() == 1;\n case HloOpcode::kAsyncStart:\n if (!HloInstruction::IsThreadIncluded(inst->async_execution_thread(),\n execution_threads)) {\n return true;\n }\n return false;\n default:\n return false;\n }\n}\nHloInstruction* PadWithScalar(HloInstruction* inst, int64_t dim,\n HloInstruction* dynamic_size,\n HloInstruction* padding_scalar) {\n CHECK(inst != nullptr && dynamic_size != nullptr &&\n padding_scalar != nullptr);\n const Shape mask_shape =\n ShapeUtil::MakeShape(xla::S32, inst->shape().dimensions());\n const Shape pred_shape =\n ShapeUtil::MakeShape(xla::PRED, inst->shape().dimensions());\n HloInstruction* iota =\n inst->AddInstruction(HloInstruction::CreateIota(mask_shape, dim));\n HloInstruction* broadcasted_effective_size = inst->AddInstruction(\n HloInstruction::CreateBroadcast(mask_shape, dynamic_size, {}));\n HloInstruction* pred = inst->AddInstruction(HloInstruction::CreateCompare(\n pred_shape, iota, broadcasted_effective_size, ComparisonDirection::kLt));\n HloInstruction* broadcasted_identity_value =\n inst->AddInstruction(HloInstruction::CreateBroadcast(\n ShapeUtil::MakeStaticShape(inst->shape()), padding_scalar, {}));\n HloInstruction* padded = inst->AddInstruction(HloInstruction::CreateTernary(\n ShapeUtil::MakeStaticShape(inst->shape()), HloOpcode::kSelect, pred, inst,\n broadcasted_identity_value));\n return padded;\n}\nHloInstruction* GenerateBinaryMask(\n HloInstruction* reshape, int64_t input_dim,\n absl::Span output_dims,\n absl::Span output_dynamic_dims, HloInstruction* one,\n HloInstruction* zero, bool split_input) {\n Shape input_shape =\n split_input ? reshape->operand(0)->shape() : reshape->shape();\n Shape output_shape =\n split_input ? reshape->shape() : reshape->operand(0)->shape();\n const Shape mask_input_shape =\n ShapeUtil::MakeShape(xla::S32, {input_shape.dimensions(input_dim)});\n const Shape pred_input_shape =\n ShapeUtil::MakeShape(xla::PRED, {input_shape.dimensions(input_dim)});\n HloInstruction* pred_true = reshape->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(true)));\n HloInstruction* input_shape_pred_mask = reshape->AddInstruction(\n HloInstruction::CreateBroadcast(pred_input_shape, pred_true, {}));\n bool need_rewrite = false;\n HloInstruction* iota =\n reshape->AddInstruction(HloInstruction::CreateIota(mask_input_shape, 0));\n for (int64_t i = 1; i < output_dims.size(); ++i) {\n if (output_dynamic_dims[output_dims[i]] != nullptr) {\n need_rewrite = true;\n break;\n }\n }\n if (!need_rewrite) {\n return nullptr;\n }\n for (int64_t i = output_dims.size() - 1; i > 0; i--) {\n const int64_t output_dim = output_dims[i];\n HloInstruction* dynamic_size = output_dynamic_dims[output_dim];\n HloInstruction* static_output_dim_size = reshape->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n output_shape.dimensions(output_dim))));\n HloInstruction* broadcasted_static_output_dim_size =\n reshape->AddInstruction(HloInstruction::CreateBroadcast(\n mask_input_shape, static_output_dim_size, {}));\n if (dynamic_size != nullptr) {\n HloInstruction* dim_index =\n reshape->AddInstruction(HloInstruction::CreateBinary(\n mask_input_shape, HloOpcode::kRemainder, iota,\n broadcasted_static_output_dim_size));\n HloInstruction* broadcasted_effective_size = reshape->AddInstruction(\n HloInstruction::CreateBroadcast(mask_input_shape, dynamic_size, {}));\n HloInstruction* selected =\n reshape->AddInstruction(HloInstruction::CreateCompare(\n pred_input_shape, dim_index, broadcasted_effective_size,\n ComparisonDirection::kLt));\n input_shape_pred_mask = reshape->AddInstruction(\n HloInstruction::CreateBinary(pred_input_shape, HloOpcode::kAnd,\n input_shape_pred_mask, selected));\n }\n iota = reshape->AddInstruction(\n HloInstruction::CreateBinary(mask_input_shape, HloOpcode::kDivide, iota,\n broadcasted_static_output_dim_size));\n }\n HloInstruction* broadcasted_one = reshape->AddInstruction(\n HloInstruction::CreateBroadcast(mask_input_shape, one, {}));\n HloInstruction* broadcasted_zero = reshape->AddInstruction(\n HloInstruction::CreateBroadcast(mask_input_shape, zero, {}));\n return reshape->AddInstruction(HloInstruction::CreateTernary(\n mask_input_shape, HloOpcode::kSelect, input_shape_pred_mask,\n broadcasted_one, broadcasted_zero));\n}\nabsl::StatusOr RewriteDynamicReshapeSplitInput(\n HloInstruction* reshape, int64_t input_dim,\n absl::Span output_dims,\n absl::Span output_dynamic_dims,\n DynamicDimensionInference* dynamic_dimension_inference) {\n VLOG(2) << \"Reshaping input dim \" << input_dim << \" to \"\n << VectorString(output_dims);\n const Shape operand_shape = reshape->operand(0)->shape();\n TF_RET_CHECK(output_dims.size() > 1);\n const Shape mask_input_shape =\n ShapeUtil::MakeShape(xla::S32, {operand_shape.dimensions(input_dim)});\n const Shape pred_input_shape =\n ShapeUtil::MakeShape(xla::PRED, {operand_shape.dimensions(input_dim)});\n HloInstruction* zero = reshape->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));\n HloInstruction* one = reshape->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::One(S32)));\n HloInstruction* input_shape_binary_mask =\n GenerateBinaryMask(reshape, input_dim, output_dims, output_dynamic_dims,\n one, zero, true);\n if (input_shape_binary_mask == nullptr) {\n VLOG(2) << \"No need to rewrite\";\n return false;\n }\n auto embedded_builder = HloComputation::Builder(\"add\");\n {\n auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(S32, {}), \"lhs\"));\n auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(S32, {}), \"rhs\"));\n embedded_builder.AddInstruction(\n HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));\n }\n HloComputation* add =\n reshape->GetModule()->AddEmbeddedComputation(embedded_builder.Build());\n Window cumsum_window;\n WindowDimension* dim = cumsum_window.add_dimensions();\n dim->set_size(operand_shape.dimensions(input_dim));\n dim->set_stride(1);\n dim->set_padding_low(operand_shape.dimensions(input_dim) - 1);\n dim->set_padding_high(0);\n dim->set_window_dilation(1);\n dim->set_base_dilation(1);\n HloInstruction* cumsum =\n reshape->AddInstruction(HloInstruction::CreateReduceWindow(\n mask_input_shape, input_shape_binary_mask, zero, cumsum_window, add));\n HloInstruction* broadcast_ones = reshape->AddInstruction(\n HloInstruction::CreateBroadcast(mask_input_shape, one, {}));\n cumsum = reshape->AddInstruction(HloInstruction::CreateBinary(\n mask_input_shape, HloOpcode::kSubtract, cumsum, broadcast_ones));\n GatherDimensionNumbers gather_dim_numbers;\n for (int64_t i = 0; i < operand_shape.dimensions_size(); ++i) {\n if (i != input_dim) {\n gather_dim_numbers.add_offset_dims(i);\n }\n }\n gather_dim_numbers.add_start_index_map(input_dim);\n gather_dim_numbers.set_index_vector_dim(1);\n gather_dim_numbers.add_collapsed_slice_dims(input_dim);\n HloInstruction* operand_static_dim_size =\n reshape->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(operand_shape.dimensions(input_dim))));\n HloInstruction* operand_static =\n reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(\n operand_shape, reshape->mutable_operand(0), operand_static_dim_size,\n input_dim));\n std::vector slice_sizes(operand_shape.dimensions().begin(),\n operand_shape.dimensions().end());\n slice_sizes[input_dim] = 1;\n HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather(\n ShapeUtil::MakeShape(operand_shape.element_type(),\n operand_shape.dimensions()),\n operand_static, cumsum, gather_dim_numbers, slice_sizes, true));\n TF_RETURN_IF_ERROR(reshape->ReplaceOperandWith(0, gather));\n HloInstruction* reshape_dynamic = reshape;\n auto users = reshape->users();\n for (int64_t output_dim : output_dims) {\n HloInstruction* output_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim);\n if (output_dynamic_size != nullptr) {\n reshape_dynamic =\n reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(\n reshape->shape(), reshape_dynamic, output_dynamic_size,\n output_dim));\n }\n }\n for (auto* user : users) {\n TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, reshape_dynamic));\n }\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n reshape, reshape_dynamic, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicReshapeCombineInput(\n HloInstruction* reshape, absl::Span input_dims,\n int64_t output_dim, absl::Span input_dynamic_dims,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloInstruction* zero = reshape->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));\n HloInstruction* one = reshape->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::One(S32)));\n const Shape output_shape = reshape->shape();\n const Shape input_shape = reshape->operand(0)->shape();\n const Shape mask_output_shape =\n ShapeUtil::MakeShape(xla::S32, {output_shape.dimensions(output_dim)});\n HloInstruction* output_shape_binary_mask =\n GenerateBinaryMask(reshape, output_dim, input_dims, input_dynamic_dims,\n one, zero, false);\n if (output_shape_binary_mask == nullptr) {\n VLOG(2) << \"No need to rewrite\";\n return false;\n }\n HloInstruction* iota =\n reshape->AddInstruction(HloInstruction::CreateIota(mask_output_shape, 0));\n HloComputation::Builder comp_builder(\"compare\");\n HloInstruction* lhs_key =\n comp_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeScalarShape(S32), \"lhs_key\"));\n HloInstruction* rhs_key =\n comp_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeScalarShape(S32), \"rhs_key\"));\n comp_builder.AddInstruction(HloInstruction::CreateParameter(\n 2, ShapeUtil::MakeScalarShape(S32), \"lhs_value\"));\n comp_builder.AddInstruction(HloInstruction::CreateParameter(\n 3, ShapeUtil::MakeScalarShape(S32), \"rhs_value\"));\n comp_builder.AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), lhs_key,\n rhs_key, ComparisonDirection::kGt));\n HloComputation* compare =\n reshape->GetModule()->AddEmbeddedComputation(comp_builder.Build());\n HloInstruction* sort = reshape->AddInstruction(HloInstruction::CreateSort(\n ShapeUtil::MakeTupleShape({mask_output_shape, mask_output_shape}), 0,\n {output_shape_binary_mask, iota}, compare,\n true));\n HloInstruction* gather_indices = reshape->AddInstruction(\n HloInstruction::CreateGetTupleElement(mask_output_shape, sort, 1));\n GatherDimensionNumbers gather_dim_numbers;\n for (int64_t i = 0; i < output_shape.dimensions_size(); ++i) {\n if (i != output_dim) {\n gather_dim_numbers.add_offset_dims(i);\n }\n }\n gather_dim_numbers.add_start_index_map(output_dim);\n gather_dim_numbers.set_index_vector_dim(1);\n gather_dim_numbers.add_collapsed_slice_dims(output_dim);\n HloInstruction* static_dim_size = reshape->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n reshape->shape().dimensions(output_dim))));\n Shape reshape_static_shape = reshape->shape();\n reshape_static_shape.set_dynamic_dimension(output_dim, false);\n HloInstruction* reshape_static =\n reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(\n reshape_static_shape, reshape, static_dim_size, output_dim));\n std::vector gather_slice_sizes(output_shape.dimensions().begin(),\n output_shape.dimensions().end());\n gather_slice_sizes[output_dim] = 1;\n HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather(\n output_shape, reshape_static, gather_indices, gather_dim_numbers,\n gather_slice_sizes, true));\n HloInstruction* output_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim);\n TF_RET_CHECK(output_dynamic_size != nullptr);\n gather = reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(\n gather->shape(), gather, output_dynamic_size, output_dim));\n auto users = reshape->users();\n for (auto* user : users) {\n if (user != reshape_static && user != output_dynamic_size) {\n TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, gather));\n }\n }\n if (reshape == reshape->parent()->root_instruction()) {\n reshape->parent()->set_root_instruction(gather);\n }\n TF_RETURN_IF_ERROR(\n dynamic_dimension_inference->ForwardDynamicSize(reshape, gather, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicReshapeSingleGroup(\n HloInstruction* reshape, absl::Span input_dims,\n absl::Span output_dims,\n absl::Span input_dynamic_dims,\n absl::Span output_dynamic_dims,\n DynamicDimensionInference* dynamic_dimension_inference) {\n VLOG(2) << \"Rewriting dynamic reshape \" << reshape->ToString()\n << \" input dims: \" << VectorString(input_dims)\n << \" output dims: \" << VectorString(output_dims);\n const Shape operand_shape = reshape->operand(0)->shape();\n const Shape output_shape = reshape->shape();\n if (input_dims.size() == 1) {\n int64_t input_dim = input_dims[0];\n if (operand_shape.dimensions()[input_dim] == 1) {\n return false;\n }\n return RewriteDynamicReshapeSplitInput(reshape, input_dim, output_dims,\n output_dynamic_dims,\n dynamic_dimension_inference);\n }\n if (output_dims.size() == 1) {\n int64_t output_dim = output_dims[0];\n if (output_shape.dimensions()[output_dim] == 1) {\n return false;\n }\n return RewriteDynamicReshapeCombineInput(reshape, input_dims, output_dim,\n input_dynamic_dims,\n dynamic_dimension_inference);\n }\n TF_RET_CHECK(false);\n return false;\n}\nabsl::StatusOr RewriteReverse(\n HloInstruction* reverse,\n DynamicDimensionInference* dynamic_dimension_inference) {\n auto reverse_dims = reverse->dimensions();\n const Shape& reverse_shape = reverse->shape();\n std::set dynamic_reverse_dims;\n for (int64_t reverse_dim : reverse_dims) {\n HloInstruction* dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(reverse, {}, reverse_dim);\n if (dynamic_size == nullptr) {\n continue;\n }\n dynamic_reverse_dims.insert(reverse_dim);\n }\n if (dynamic_reverse_dims.empty()) {\n return false;\n }\n PaddingConfig padding;\n Shape pad_shape = reverse_shape;\n for (int i = 0; i < reverse_shape.rank(); ++i) {\n auto dimension = padding.add_dimensions();\n if (dynamic_reverse_dims.count(i) > 0) {\n dimension->set_edge_padding_low(0);\n dimension->set_edge_padding_high(reverse_shape.dimensions(i));\n dimension->set_interior_padding(0);\n pad_shape.set_dimensions(i, 2 * pad_shape.dimensions(i));\n }\n }\n HloInstruction* cloned_reverse = reverse->AddInstruction(reverse->Clone());\n HloInstruction* zero = reverse->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(pad_shape.element_type())));\n HloInstruction* pad = reverse->AddInstruction(\n HloInstruction::CreatePad(pad_shape, cloned_reverse, zero, padding));\n std::vector start_indices;\n start_indices.reserve(reverse_shape.rank());\n for (int i = 0; i < reverse_shape.rank(); ++i) {\n if (dynamic_reverse_dims.count(i) > 0) {\n HloInstruction* bound_size =\n reverse->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(reverse_shape.dimensions(i))));\n HloInstruction* dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(reverse, {}, i);\n HloInstruction* start_offset =\n reverse->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract, bound_size,\n dynamic_size));\n start_indices.push_back(start_offset);\n } else {\n HloInstruction* zero = reverse->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));\n start_indices.push_back(zero);\n }\n }\n HloInstruction* dynamic_reverse =\n reverse->AddInstruction(HloInstruction::CreateDynamicSlice(\n reverse_shape, pad, start_indices, reverse_shape.dimensions()));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n reverse, dynamic_reverse, {}));\n TF_RETURN_IF_ERROR(reverse->ReplaceAllUsesWith(dynamic_reverse));\n return true;\n}\nHloInstruction* RewriteInputWithDynamicPadding(\n HloInstruction* conv, HloInstruction* input, HloInstruction* padding_value,\n absl::Span padding_before, Window* input_window,\n absl::FunctionRef window_dim_to_shape_dim) {\n HloInstruction* zero_s32 = conv->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));\n Shape padded_shape = input->shape();\n PaddingConfig padding_configs;\n for (int64_t i = 0; i < input->shape().rank(); ++i) {\n PaddingConfig::PaddingConfigDimension padding_dim;\n *padding_configs.add_dimensions() = padding_dim;\n }\n std::vector start_indices(input->shape().rank(), zero_s32);\n for (int64_t dim_index = 0; dim_index < input_window->dimensions_size();\n ++dim_index) {\n if (padding_before[dim_index] == nullptr) {\n continue;\n }\n int64_t shape_dim = window_dim_to_shape_dim(dim_index);\n WindowDimension* window_dim = input_window->mutable_dimensions(dim_index);\n auto* padding_dim = padding_configs.mutable_dimensions(shape_dim);\n const int64_t dilated_window_size = window_util::DilatedBound(\n window_dim->size(), window_dim->window_dilation());\n padding_dim->set_edge_padding_low(dilated_window_size);\n padding_dim->set_edge_padding_high(window_dim->padding_high() +\n window_dim->padding_low());\n padding_dim->set_interior_padding(window_dim->base_dilation() - 1);\n HloInstruction* slicing_start =\n conv->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract,\n conv->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n padding_dim->edge_padding_low()))),\n padding_before[dim_index]));\n start_indices[shape_dim] = slicing_start;\n padded_shape.mutable_dimensions()[shape_dim] =\n window_dim->padding_low() +\n window_util::DilatedBound(padded_shape.dimensions(shape_dim),\n window_dim->base_dilation()) +\n window_dim->padding_high();\n window_dim->clear_padding_high();\n window_dim->clear_padding_low();\n window_dim->set_base_dilation(1);\n input->mutable_shape()->set_dynamic_dimension(shape_dim, false);\n }\n HloInstruction* pad =\n MakePadHlo(input, padding_value, padding_configs).value();\n input = conv->AddInstruction(HloInstruction::CreateDynamicSlice(\n padded_shape, pad, start_indices, padded_shape.dimensions()));\n return input;\n}\nabsl::StatusOr RewriteDynamicConvolutionInputGrad(\n HloInstruction* custom_call_conv,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloInstruction* grad = custom_call_conv->mutable_operand(1);\n HloInstruction* kernel = custom_call_conv->mutable_operand(2);\n TF_RET_CHECK(kernel->shape().is_static());\n auto dnums = custom_call_conv->convolution_dimension_numbers();\n Window window = custom_call_conv->window();\n HloInstruction* zero =\n custom_call_conv->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(custom_call_conv->shape().element_type())));\n std::vector padding_before(\n dnums.input_spatial_dimensions_size(), nullptr);\n for (int64_t spatial_dim_index = 0;\n spatial_dim_index < dnums.input_spatial_dimensions_size();\n ++spatial_dim_index) {\n int64_t input_spatial_dim =\n dnums.input_spatial_dimensions(spatial_dim_index);\n HloInstruction* operand_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(\n custom_call_conv->mutable_operand(1), {}, input_spatial_dim);\n if (operand_dynamic_size == nullptr) {\n continue;\n }\n grad = PadWithScalar(grad, input_spatial_dim, operand_dynamic_size, zero);\n HloInstruction* slice =\n custom_call_conv->AddInstruction(HloInstruction::CreateSlice(\n ShapeUtil::MakeShape(S32, {1}),\n custom_call_conv->mutable_operand(0), {input_spatial_dim},\n {input_spatial_dim + 1}, {1}));\n HloInstruction* dynamic_input_size = custom_call_conv->AddInstruction(\n HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice));\n const WindowDimension& window_dim = window.dimensions(spatial_dim_index);\n DynamicWindowDims dynamic_window_dims = GetWindowedInputGradSize(\n dynamic_input_size, window_dim.size(),\n window_dim.window_dilation(),\n window_dim.base_dilation(),\n custom_call_conv->padding_type());\n padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;\n }\n if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {\n grad = RewriteInputWithDynamicPadding(\n custom_call_conv, grad, zero, absl::MakeSpan(padding_before), &window,\n [&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });\n }\n PrecisionConfig precision_config;\n if (custom_call_conv->precision_config().operand_precision_size() == 3) {\n *precision_config.mutable_operand_precision() = {\n custom_call_conv->precision_config().operand_precision().begin() + 1,\n custom_call_conv->precision_config().operand_precision().end()};\n }\n HloInstruction* static_conv =\n custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(\n custom_call_conv->shape(), grad, kernel,\n custom_call_conv->feature_group_count(),\n custom_call_conv->batch_group_count(), window,\n custom_call_conv->convolution_dimension_numbers(),\n custom_call_conv->precision_config()));\n TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n custom_call_conv, static_conv, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicConvolutionForward(\n HloInstruction* custom_call_conv,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloInstruction* input = custom_call_conv->mutable_operand(0);\n HloInstruction* kernel = custom_call_conv->mutable_operand(1);\n Window window = custom_call_conv->window();\n auto dnums = custom_call_conv->convolution_dimension_numbers();\n HloInstruction* zero =\n custom_call_conv->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(custom_call_conv->shape().element_type())));\n std::vector padding_before(\n dnums.input_spatial_dimensions_size(), nullptr);\n for (int64_t spatial_dim_index = 0;\n spatial_dim_index < dnums.input_spatial_dimensions_size();\n ++spatial_dim_index) {\n int64_t input_spatial_dim =\n dnums.input_spatial_dimensions(spatial_dim_index);\n HloInstruction* operand_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(\n custom_call_conv->mutable_operand(0), {}, input_spatial_dim);\n if (operand_dynamic_size == nullptr) {\n continue;\n }\n input = PadWithScalar(input, input_spatial_dim, operand_dynamic_size, zero);\n const WindowDimension& window_dim = window.dimensions(spatial_dim_index);\n DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(\n operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),\n window_dim.stride(), custom_call_conv->padding_type());\n padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;\n }\n const int64_t input_feature_dim = dnums.input_feature_dimension();\n if (HloInstruction* input_feature_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(\n custom_call_conv->mutable_operand(0), {}, input_feature_dim)) {\n input = PadWithScalar(input, input_feature_dim, input_feature_dynamic_size,\n zero);\n }\n if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {\n input = RewriteInputWithDynamicPadding(\n custom_call_conv, input, zero, absl::MakeSpan(padding_before), &window,\n [&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });\n }\n HloInstruction* static_conv =\n custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(\n custom_call_conv->shape(), input, kernel,\n custom_call_conv->feature_group_count(),\n custom_call_conv->batch_group_count(), window,\n custom_call_conv->convolution_dimension_numbers(),\n custom_call_conv->precision_config()));\n TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n custom_call_conv, static_conv, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicConvolutionKernelGrad(\n HloInstruction* custom_call_conv,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloInstruction* activations = custom_call_conv->mutable_operand(0);\n HloInstruction* gradients = custom_call_conv->mutable_operand(1);\n TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(activations));\n TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(gradients));\n Window window = custom_call_conv->window();\n auto dnums = custom_call_conv->convolution_dimension_numbers();\n HloInstruction* zero =\n custom_call_conv->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(custom_call_conv->shape().element_type())));\n std::vector padding_before(\n dnums.input_spatial_dimensions_size(), nullptr);\n for (int64_t spatial_dim_index = 0;\n spatial_dim_index < dnums.input_spatial_dimensions_size();\n ++spatial_dim_index) {\n int64_t input_spatial_dim =\n dnums.input_spatial_dimensions(spatial_dim_index);\n int64_t kernel_spatial_dim =\n dnums.kernel_spatial_dimensions(spatial_dim_index);\n HloInstruction* activations_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(\n custom_call_conv->mutable_operand(0), {}, input_spatial_dim);\n if (activations_dynamic_size != nullptr) {\n activations = PadWithScalar(activations, input_spatial_dim,\n activations_dynamic_size, zero);\n }\n HloInstruction* gradients_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(\n custom_call_conv->mutable_operand(1), {}, kernel_spatial_dim);\n if (gradients_dynamic_size != nullptr) {\n gradients = PadWithScalar(gradients, kernel_spatial_dim,\n gradients_dynamic_size, zero);\n }\n if (activations_dynamic_size == nullptr ||\n gradients_dynamic_size == nullptr) {\n TF_RET_CHECK(activations_dynamic_size == nullptr &&\n gradients_dynamic_size == nullptr);\n continue;\n }\n int64_t output_spatial_dim =\n dnums.output_spatial_dimensions(spatial_dim_index);\n const WindowDimension& window_dim = window.dimensions(spatial_dim_index);\n DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(\n activations_dynamic_size, \n custom_call_conv->shape().dimensions(output_spatial_dim),\n window_dim.stride(),\n window_dim.window_dilation(),\n custom_call_conv->padding_type());\n padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;\n }\n const int64_t input_feature_dim = dnums.input_feature_dimension();\n if (HloInstruction* input_feature_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(\n custom_call_conv->mutable_operand(0), {}, input_feature_dim)) {\n activations = PadWithScalar(activations, input_feature_dim,\n input_feature_dynamic_size, zero);\n }\n if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {\n activations = RewriteInputWithDynamicPadding(\n custom_call_conv, activations, zero, absl::MakeSpan(padding_before),\n &window,\n [&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });\n }\n HloInstruction* static_conv =\n custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(\n custom_call_conv->shape(), activations, gradients,\n custom_call_conv->feature_group_count(),\n custom_call_conv->batch_group_count(), window,\n custom_call_conv->convolution_dimension_numbers(),\n custom_call_conv->precision_config()));\n TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n custom_call_conv, static_conv, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicReduceWindowSamePadding(\n HloInstruction* hlo,\n DynamicDimensionInference* dynamic_dimension_inference) {\n if (hlo->shape().IsTuple()) {\n return Unimplemented(\"DynamicReduceWindowSamePadding not yet supported.\");\n }\n HloInstruction* input = hlo->mutable_operand(0);\n HloInstruction* init = hlo->mutable_operand(1);\n int64_t rank = hlo->shape().rank();\n Window window = hlo->window();\n std::vector padding_before(hlo->shape().rank(), nullptr);\n for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {\n HloInstruction* operand_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {},\n dim_index);\n if (operand_dynamic_size == nullptr) {\n continue;\n }\n const WindowDimension& window_dim = window.dimensions(dim_index);\n if (window_util::IsTrivialWindowDimension(window_dim)) {\n continue;\n }\n input = PadWithScalar(input, dim_index, operand_dynamic_size, init);\n DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(\n operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),\n window_dim.stride(), PaddingType::PADDING_SAME);\n padding_before[dim_index] = dynamic_window_dims.padding_before;\n }\n input = RewriteInputWithDynamicPadding(\n hlo, input, init, absl::MakeSpan(padding_before), &window,\n [](int64_t dim) { return dim; });\n HloInstruction* rewritten =\n hlo->AddInstruction(HloInstruction::CreateReduceWindow(\n hlo->shape(), input, init, window, hlo->called_computations()[0]));\n TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten));\n TF_RETURN_IF_ERROR(\n dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicSelectAndScatterSamePadding(\n HloInstruction* hlo,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloInstruction* input = hlo->mutable_operand(0);\n HloInstruction* source = hlo->mutable_operand(1);\n HloInstruction* init = hlo->mutable_operand(2);\n TF_ASSIGN_OR_RETURN(HloInstruction * input_padding_value,\n ChooseIdentityValue(hlo, 0));\n int64_t rank = hlo->shape().rank();\n Window window = hlo->window();\n std::vector padding_before(hlo->shape().rank(), nullptr);\n for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {\n const WindowDimension& window_dim = window.dimensions(dim_index);\n if (window_util::IsTrivialWindowDimension(window_dim)) {\n continue;\n }\n HloInstruction* operand_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {},\n dim_index);\n if (operand_dynamic_size == nullptr) {\n continue;\n }\n input = PadWithScalar(input, dim_index, operand_dynamic_size,\n input_padding_value);\n HloInstruction* source_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(1), {},\n dim_index);\n if (source_dynamic_size == nullptr) {\n continue;\n }\n source = PadWithScalar(source, dim_index, source_dynamic_size, init);\n DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(\n operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),\n window_dim.stride(), PaddingType::PADDING_SAME);\n padding_before[dim_index] = dynamic_window_dims.padding_before;\n }\n input = RewriteInputWithDynamicPadding(\n hlo, input, input_padding_value, absl::MakeSpan(padding_before), &window,\n [](int64_t dim) { return dim; });\n HloInstruction* rewritten =\n hlo->AddInstruction(HloInstruction::CreateSelectAndScatter(\n input->shape(), input, hlo->called_computations()[0], window, source,\n init, hlo->called_computations()[1]));\n std::vector start_indices(\n input->shape().rank(), hlo->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(S32))));\n PaddingConfig padding_configs;\n for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {\n PaddingConfig::PaddingConfigDimension padding_dim;\n if (padding_before[dim_index] != nullptr) {\n const WindowDimension& window_dim = window.dimensions(dim_index);\n const int64_t dilated_window_size = window_util::DilatedBound(\n window_dim.size(), window_dim.window_dilation());\n padding_dim.set_edge_padding_high(dilated_window_size);\n start_indices[dim_index] = padding_before[dim_index];\n }\n *padding_configs.add_dimensions() = padding_dim;\n }\n HloInstruction* padded = MakePadHlo(rewritten, init, padding_configs).value();\n rewritten = hlo->AddInstruction(HloInstruction::CreateDynamicSlice(\n hlo->shape(), padded, start_indices, hlo->shape().dimensions()));\n TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten));\n TF_RETURN_IF_ERROR(\n dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicConcat(\n HloInstruction* concat,\n DynamicDimensionInference* dynamic_dimension_inference) {\n const int64_t concat_dim = concat->concatenate_dimension();\n if (dynamic_dimension_inference->GetDynamicSize(concat, {}, concat_dim) ==\n nullptr) {\n return false;\n }\n std::vector offsets;\n offsets.reserve(concat->shape().dimensions_size());\n for (int64_t i = 0; i < concat->shape().dimensions_size(); ++i) {\n offsets.push_back(concat->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0))));\n }\n HloInstruction* rewritten_concat = concat;\n auto prev_users = concat->users();\n for (int64_t i = 0; i < concat->operand_count(); ++i) {\n HloInstruction* operand = concat->mutable_operand(i);\n rewritten_concat =\n concat->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(\n rewritten_concat->shape(), rewritten_concat, operand, offsets));\n HloInstruction* dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(operand, {}, concat_dim);\n if (dynamic_size == nullptr) {\n HloInstruction* static_size = concat->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n operand->shape().dimensions(concat_dim))));\n offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim],\n static_size));\n } else {\n offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim],\n dynamic_size));\n }\n }\n TF_RETURN_IF_ERROR(concat->ReplaceUsesWith(prev_users, rewritten_concat));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n concat, rewritten_concat, {}));\n return true;\n}\nabsl::StatusOr RewriteDynamicSort(\n HloInstruction* hlo,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloInstruction* dynamic_size = nullptr;\n HloSortInstruction* sort = Cast(hlo);\n int64_t sort_dim = sort->sort_dimension();\n for (auto* operand : sort->operands()) {\n if (dynamic_size == nullptr) {\n dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(operand, {}, sort_dim);\n }\n }\n if (dynamic_size == nullptr) {\n return false;\n }\n Shape operand_shape =\n ShapeUtil::ChangeElementType(sort->operand(0)->shape(), S32);\n Shape broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);\n HloInstruction* iota = hlo->AddInstruction(\n HloInstruction::CreateIota(broadcast_shape, sort_dim));\n HloInstruction* dynamic_size_broadcasted = hlo->AddInstruction(\n HloInstruction::CreateBroadcast(broadcast_shape, dynamic_size, {}));\n HloInstruction* lt = hlo->AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::ChangeElementType(broadcast_shape, PRED), iota,\n dynamic_size_broadcasted, ComparisonDirection::kLt));\n sort->AppendOperand(lt);\n const int64_t param_number_before_rewritten =\n sort->called_computations()[0]->num_parameters();\n auto new_param_0 = HloInstruction::CreateParameter(\n param_number_before_rewritten, ShapeUtil::MakeScalarShape(PRED),\n \"inbound_lhs\");\n auto new_param_1 = HloInstruction::CreateParameter(\n param_number_before_rewritten + 1, ShapeUtil::MakeScalarShape(PRED),\n \"inbound_rhs\");\n std::vector extra_parameters{new_param_0.get(),\n new_param_1.get()};\n HloComputation* sort_comp = sort->GetModule()->AddEmbeddedComputation(\n sort->called_computations()[0]->CloneWithReplacements(\n nullptr, extra_parameters));\n auto inbound_lhs =\n sort_comp->parameter_instruction(param_number_before_rewritten);\n auto inbound_rhs =\n sort_comp->parameter_instruction(param_number_before_rewritten + 1);\n sort->ReplaceCalledComputations(\n [&](HloComputation* comp) { return sort_comp; });\n auto out_of_bound_rhs = sort_comp->AddInstruction(HloInstruction::CreateUnary(\n ShapeUtil::MakeScalarShape(PRED), HloOpcode::kNot, inbound_rhs));\n auto sort_comp_or_out_of_bound_rhs =\n sort_comp->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(PRED), HloOpcode::kOr,\n sort_comp->root_instruction(), out_of_bound_rhs));\n auto new_root = sort_comp->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(PRED), HloOpcode::kAnd, inbound_lhs,\n sort_comp_or_out_of_bound_rhs));\n sort_comp->set_root_instruction(new_root);\n if (sort->shape().IsTuple()) {\n *sort->mutable_shape()->add_tuple_shapes() =\n ShapeUtil::ChangeElementType(operand_shape, PRED);\n } else {\n auto sort_users = sort->users();\n auto sort_clone = hlo->AddInstruction(sort->Clone());\n *sort_clone->mutable_shape() = ShapeUtil::MakeTupleShape(\n {sort->shape(), ShapeUtil::ChangeElementType(operand_shape, PRED)});\n auto rewritten_sort = hlo->AddInstruction(\n HloInstruction::CreateGetTupleElement(sort->shape(), sort_clone, 0));\n for (HloInstruction* user : sort_users) {\n TF_RETURN_IF_ERROR(sort->ReplaceUseWith(user, rewritten_sort));\n }\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n sort, rewritten_sort, {}));\n if (hlo->parent()->root_instruction() == sort) {\n hlo->parent()->set_root_instruction(rewritten_sort);\n }\n }\n return true;\n}\nabsl::StatusOr RewriteDynamicBinaryOp(\n HloInstruction* binary,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloInstruction* operand_0 = binary->mutable_operand(0);\n HloInstruction* operand_1 = binary->mutable_operand(1);\n TF_RET_CHECK(operand_0->shape().rank() == operand_1->shape().rank());\n auto dims_0 = dynamic_dimension_inference->GetDynamicSizes(operand_0, {});\n auto dims_1 = dynamic_dimension_inference->GetDynamicSizes(operand_1, {});\n bool changed = false;\n for (int64_t i = 0; i < dims_0.size(); ++i) {\n HloInstruction* dim_0 = dims_0[i];\n HloInstruction* dim_1 = dims_1[i];\n if (dims_0[i] != dims_1[i] && dims_0[i] != nullptr &&\n dims_1[i] != nullptr) {\n changed = true;\n auto rewrite_operand = [&](HloInstruction* pred,\n HloInstruction* operand) -> HloInstruction* {\n Shape static_shape = ShapeUtil::MakeStaticShape(operand->shape());\n pred = binary->AddInstruction(HloInstruction::CreateBroadcast(\n ShapeUtil::ChangeElementType(static_shape, PRED), pred, {}));\n Shape slice_shape = static_shape;\n slice_shape.set_dimensions(i, 1);\n std::vector start_indices(slice_shape.rank(), 0);\n std::vector strides(slice_shape.rank(), 1);\n HloInstruction* slice = binary->AddInstruction(\n HloInstruction::CreateSlice(slice_shape, operand, start_indices,\n slice_shape.dimensions(), strides));\n Shape reshape_shape = ShapeUtil::DeleteDimension(i, slice_shape);\n HloInstruction* reshape = binary->AddInstruction(\n HloInstruction::CreateReshape(reshape_shape, slice));\n std::vector broadcast_dims;\n broadcast_dims.reserve(static_shape.rank() - 1);\n for (int64_t j = 0; j < static_shape.rank(); ++j) {\n if (j != i) {\n broadcast_dims.push_back(j);\n }\n }\n HloInstruction* broadcast = binary->parent()->AddInstruction(\n HloInstruction::CreateBroadcast(static_shape, reshape,\n broadcast_dims),\n \"implicit_broadcast\");\n HloInstruction* select =\n binary->AddInstruction(HloInstruction::CreateTernary(\n static_shape, HloOpcode::kSelect, pred, broadcast, operand));\n return select;\n };\n HloInstruction* one = binary->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::One(S32)));\n auto operand_0_needs_broadcast = binary->parent()->AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0,\n dim_1, ComparisonDirection::kLt),\n \"lhs_less_than_rhs\");\n auto is_one = binary->parent()->AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0,\n one, ComparisonDirection::kEq),\n \"lhs_is_one\");\n operand_0_needs_broadcast = binary->parent()->AddInstruction(\n HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}),\n HloOpcode::kAnd, is_one,\n operand_0_needs_broadcast),\n \"lhs_needs_implicit_broadcast\");\n operand_0 = rewrite_operand(operand_0_needs_broadcast, operand_0);\n auto operand_1_needs_broadcast = binary->parent()->AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1,\n dim_0, ComparisonDirection::kLt),\n \"rhs_less_than_lhs\");\n is_one = binary->parent()->AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1,\n one, ComparisonDirection::kEq),\n \"rhs_is_one\");\n operand_1_needs_broadcast = binary->parent()->AddInstruction(\n HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}),\n HloOpcode::kAnd, is_one,\n operand_1_needs_broadcast),\n \"lhs_needs_implicit_broadcast\");\n operand_1 = rewrite_operand(operand_1_needs_broadcast, operand_1);\n }\n }\n if (changed) {\n TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(0, operand_0));\n TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(1, operand_1));\n }\n return changed;\n}\nabsl::StatusOr RewriteDynamicUpdateSlice(\n HloInstruction* hlo,\n DynamicDimensionInference* dynamic_dimension_inference) {\n HloDynamicUpdateSliceInstruction* dus =\n Cast(hlo);\n HloInstruction* update = dus->mutable_operand(1);\n HloInstruction* base = dus->mutable_operand(0);\n std::vector dynamic_dims_in_partial_update(\n update->shape().rank(), nullptr);\n bool needs_rewrite = false;\n for (int64_t i = 0; i < update->shape().rank(); ++i) {\n if (update->shape().dimensions(i) < base->shape().dimensions(i)) {\n HloInstruction* dynamic_dim =\n dynamic_dimension_inference->GetDynamicSize(update, {}, i);\n if (dynamic_dim != nullptr) {\n dynamic_dims_in_partial_update[i] = dynamic_dim;\n needs_rewrite = true;\n }\n }\n }\n if (!needs_rewrite) {\n return false;\n }\n std::vector indices;\n indices.reserve(dus->operand_count() - 2);\n for (int64_t i = 2; i < dus->operand_count(); ++i) {\n indices.push_back(dus->mutable_operand(i));\n }\n HloInstruction* base_slice =\n dus->AddInstruction(HloInstruction::CreateDynamicSlice(\n update->shape(), base, indices, update->shape().dimensions()));\n for (int64_t i = 0; i < dynamic_dims_in_partial_update.size(); ++i) {\n HloInstruction* dynamic_dim = dynamic_dims_in_partial_update[i];\n if (dynamic_dim != nullptr) {\n Shape mask_shape_int = ShapeUtil::ChangeElementType(update->shape(), S32);\n Shape mask_shape_pred =\n ShapeUtil::ChangeElementType(update->shape(), PRED);\n HloInstruction* iota =\n dus->AddInstruction(HloInstruction::CreateIota(mask_shape_int, i));\n HloInstruction* broadcast_dim = dus->AddInstruction(\n HloInstruction::CreateBroadcast(mask_shape_int, dynamic_dim, {}));\n HloInstruction* pred = dus->AddInstruction(HloInstruction::CreateCompare(\n mask_shape_pred, iota, broadcast_dim, ComparisonDirection::kLt));\n update = dus->AddInstruction(HloInstruction::CreateTernary(\n update->shape(), HloOpcode::kSelect, pred, update, base_slice));\n }\n }\n TF_RETURN_IF_ERROR(dus->ReplaceOperandWith(1, update));\n return true;\n}\nabsl::StatusOr RewriteDynamicReshape(\n HloInstruction* reshape,\n DynamicDimensionInference* dynamic_dimension_inference) {\n bool changed = false;\n HloInstruction* operand = reshape->mutable_operand(0);\n std::vector input_dynamic_dims;\n input_dynamic_dims.reserve(operand->shape().dimensions_size());\n for (int64_t dim = 0; dim < operand->shape().dimensions_size(); ++dim) {\n input_dynamic_dims.push_back(\n dynamic_dimension_inference->GetDynamicSize(operand, {}, dim));\n }\n std::vector output_dynamic_dims;\n output_dynamic_dims.reserve(reshape->shape().dimensions_size());\n for (int64_t dim = 0; dim < reshape->shape().dimensions_size(); ++dim) {\n output_dynamic_dims.push_back(\n dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim));\n }\n auto common_factors = CommonFactors(operand->shape().dimensions(),\n reshape->shape().dimensions());\n bool need_flatten_unflatten = false;\n auto is_dynamic_dimension = [&](int64_t dim) {\n HloInstruction* operand_dynamic_size =\n dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim);\n return operand_dynamic_size != nullptr ||\n reshape->shape().is_dynamic_dimension(dim);\n };\n auto should_skip_common_factor_group = [&](DimensionVector input_dims,\n DimensionVector output_dims) {\n if (input_dims.empty() || output_dims.empty()) {\n return true;\n }\n if (absl::c_none_of(output_dims, is_dynamic_dimension)) {\n VLOG(2) << \"All dimensions are static in this common factor group\";\n return true;\n }\n if (input_dims.size() == 1 && output_dims.size() == 1) {\n return true;\n }\n return false;\n };\n for (int64_t i = 0; i < common_factors.size() - 1; ++i) {\n auto start = common_factors[i];\n auto end = common_factors[i + 1];\n DimensionVector input_dims;\n DimensionVector output_dims;\n for (int64_t dim = start.first; dim < end.first; ++dim) {\n input_dims.push_back(dim);\n }\n for (int64_t dim = start.second; dim < end.second; ++dim) {\n output_dims.push_back(dim);\n }\n if (should_skip_common_factor_group(input_dims, output_dims)) {\n continue;\n }\n if (input_dims.size() > 1 && output_dims.size() > 1) {\n need_flatten_unflatten = true;\n break;\n }\n }\n if (need_flatten_unflatten) {\n VLOG(2) << \"Rewrite dynamic reshape to flatten-unflatten pair. \"\n << reshape->ToString();\n int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());\n Shape flattened_shape =\n ShapeUtil::MakeShape(operand->shape().element_type(), {num_elements});\n HloInstruction* flatten = operand->parent()->AddInstruction(\n HloInstruction::CreateReshape(flattened_shape, operand),\n absl::StrCat(reshape->name(), \".flatten\"));\n HloInstruction* dynamic_size =\n operand->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(num_elements)));\n for (int64_t i = 0; i < operand->shape().rank(); i++) {\n HloInstruction* dynamic_dim_size =\n dynamic_dimension_inference->GetDynamicSize(operand, {}, i);\n if (dynamic_dim_size != nullptr) {\n HloInstruction* static_dim_size = operand->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n operand->shape().dimensions(i))));\n dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,\n static_dim_size));\n dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,\n dynamic_dim_size));\n }\n }\n dynamic_dimension_inference->SetDynamicSize(flatten, {}, 0, dynamic_size);\n Shape unflattened_shape = ShapeUtil::MakeStaticShape(reshape->shape());\n HloInstruction* unflatten = reshape->parent()->AddInstruction(\n HloInstruction::CreateReshape(unflattened_shape, flatten),\n absl::StrCat(reshape->name(), \".unflatten\"));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n reshape, unflatten, {}));\n TF_ASSIGN_OR_RETURN(\n bool changed_unused,\n RewriteDynamicReshape(flatten, dynamic_dimension_inference));\n TF_ASSIGN_OR_RETURN(\n changed_unused,\n RewriteDynamicReshape(unflatten, dynamic_dimension_inference));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n reshape, unflatten, {}));\n TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(unflatten));\n return true;\n }\n for (int64_t i = 0; i < common_factors.size() - 1; ++i) {\n auto start = common_factors[i];\n auto end = common_factors[i + 1];\n DimensionVector input_dims;\n DimensionVector output_dims;\n for (int64_t dim = start.first; dim < end.first; ++dim) {\n input_dims.push_back(dim);\n }\n for (int64_t dim = start.second; dim < end.second; ++dim) {\n output_dims.push_back(dim);\n }\n VLOG(2) << \"input_dims: \" << VectorString(input_dims);\n VLOG(2) << \"output_dims: \" << VectorString(output_dims);\n if (should_skip_common_factor_group(input_dims, output_dims)) {\n continue;\n }\n if (input_dims.size() > 1 && output_dims.size() > 1) {\n return Internal(\n \"Should be handled by decomposing reshape into \"\n \"flatten-unflatten pair. %s\",\n reshape->ToString());\n }\n TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReshapeSingleGroup(\n reshape, input_dims, output_dims,\n absl::MakeSpan(input_dynamic_dims),\n absl::MakeSpan(output_dynamic_dims),\n dynamic_dimension_inference));\n changed |= c;\n }\n if (reshape->opcode() == HloOpcode::kDynamicReshape) {\n auto* static_reshape =\n reshape->AddInstruction(HloInstruction::CreateReshape(\n reshape->shape(), reshape->mutable_operand(0)));\n TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(static_reshape));\n TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(\n reshape, static_reshape, {}));\n changed = true;\n }\n return changed;\n}\nclass DynamicShapeRemovingVisitor : public DfsHloRewriteVisitor {\n public:\n explicit DynamicShapeRemovingVisitor(\n const OpSupportsDynamismHandler& op_supports_dynamism_handler,\n DynamicDimensionInference* dynamic_dimension_inference,\n const absl::flat_hash_set& execution_threads)\n : op_supports_dynamism_handler_(op_supports_dynamism_handler),\n dynamic_dimension_inference_(dynamic_dimension_inference),\n execution_threads_(execution_threads) {}\n absl::Status DefaultAction(HloInstruction* hlo) override;\n absl::Status HandleCustomCall(HloInstruction* hlo) override;\n absl::Status HandleTuple(HloInstruction* hlo) override;\n absl::Status HandleGetTupleElement(HloInstruction* hlo) override;\n absl::Status HandleParameter(HloInstruction* hlo) override;\n absl::Status HandleInfeed(HloInstruction* hlo) override;\n absl::Status HandleAsyncStart(HloInstruction* hlo) override;\n absl::Status HandleAsyncUpdate(HloInstruction* hlo) override;\n absl::Status HandleAsyncDone(HloInstruction* hlo) override;\n absl::Status HandleWhile(HloInstruction* hlo) override;\n absl::Status HandleConditional(HloInstruction* hlo) override;\n absl::Status HandleGetDimensionSize(HloInstruction* hlo) override;\n absl::Status HandleSetDimensionSize(HloInstruction* hlo) override;\n static absl::StatusOr Run(\n HloComputation* computation,\n const OpSupportsDynamismHandler& op_supports_dynamism_handler,\n DynamicDimensionInference* dynamic_shape_inference,\n const absl::flat_hash_set& execution_threads,\n bool require_dynamic_output) {\n DynamicShapeRemovingVisitor visitor(op_supports_dynamism_handler,\n dynamic_shape_inference,\n execution_threads);\n TF_RETURN_IF_ERROR(computation->Accept(&visitor));\n if (require_dynamic_output) {\n HloInstruction* root = computation->root_instruction();\n if (dynamic_shape_inference->HasDynamicDimension(root)) {\n TF_ASSIGN_OR_RETURN(HloInstruction * new_root,\n visitor.ConvertToDynamic(root));\n computation->set_root_instruction(new_root);\n }\n }\n return visitor.changed();\n }\n private:\n absl::StatusOr ConvertToDynamic(HloInstruction* inst);\n absl::Status ConvertOperandsToDynamic(HloInstruction* inst);\n const OpSupportsDynamismHandler& op_supports_dynamism_handler_;\n DynamicDimensionInference* dynamic_dimension_inference_;\n absl::flat_hash_set execution_threads_;\n};\nabsl::StatusOr DynamicShapeRemovingVisitor::ConvertToDynamic(\n HloInstruction* inst) {\n if (!dynamic_dimension_inference_->HasDynamicDimension(inst)) {\n return absl::OkStatus();\n }\n MarkAsChanged();\n Shape shape = dynamic_dimension_inference_->GetDynamicShape(inst);\n auto gtes = TupleUtil::DisassembleTupleInstruction(inst);\n gtes.ForEachMutableElement([&](const ShapeIndex& index,\n HloInstruction** element) {\n const Shape& subshape = ShapeUtil::GetSubshape(shape, index);\n if (!subshape.IsArray()) {\n return;\n }\n if (!dynamic_dimension_inference_->HasDynamicDimension(inst, index)) {\n return;\n }\n std::vector slice_operand;\n slice_operand.push_back(*element);\n for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {\n auto dimension_size =\n dynamic_dimension_inference_->GetDynamicSize(inst, index, i);\n if (dimension_size == nullptr) {\n dimension_size = inst->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(subshape.dimensions(i))));\n }\n slice_operand.push_back(dimension_size);\n }\n *element = inst->AddInstruction(HloInstruction::CreateCustomCall(\n subshape, slice_operand, \"SliceToDynamic\"));\n });\n return TupleUtil::AssembleTupleInstruction(inst->parent(), std::move(gtes));\n}\nabsl::Status DynamicShapeRemovingVisitor::ConvertOperandsToDynamic(\n HloInstruction* inst) {\n for (int64_t i = 0; i < inst->operand_count(); ++i) {\n auto operand = inst->mutable_operand(i);\n if (dynamic_dimension_inference_->HasDynamicDimension(operand)) {\n TF_ASSIGN_OR_RETURN(auto dynamic_operand,\n ConvertToDynamic(inst->mutable_operand(i)));\n TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(i, dynamic_operand));\n MarkAsChanged();\n }\n }\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::DefaultAction(HloInstruction* hlo) {\n OpDynamismSupport op_support = OpDynamismSupport::kNoSupport;\n if (op_supports_dynamism_handler_) {\n op_support = op_supports_dynamism_handler_(hlo);\n }\n if (op_support == OpDynamismSupport::kRequired) {\n VLOG(1) << \"op doesn't support static tensor: \" << hlo->ToString();\n return ConvertOperandsToDynamic(hlo);\n }\n const bool input_is_dynamic = absl::c_any_of(\n hlo->operands(),\n [](const HloInstruction* hlo) { return hlo->shape().is_dynamic(); });\n if (!input_is_dynamic) {\n return absl::OkStatus();\n }\n TF_RET_CHECK(op_support != OpDynamismSupport::kNoSupport)\n << \"Dynamic input unexpectedly found for unsupported instruction: \"\n << hlo->ToString();\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleGetTupleElement(\n HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleTuple(HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleInfeed(HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleParameter(HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleCustomCall(\n HloInstruction* hlo) {\n if (hlo->custom_call_target() == \"SliceToDynamic\" ||\n hlo->custom_call_target() == \"PadToStatic\") {\n return absl::OkStatus();\n }\n return DefaultAction(hlo);\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleAsyncStart(\n HloInstruction* hlo) {\n if (HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),\n execution_threads_)) {\n return absl::OkStatus();\n }\n return ConvertOperandsToDynamic(hlo);\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleAsyncUpdate(\n HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleAsyncDone(HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleWhile(HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleConditional(\n HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleGetDimensionSize(\n HloInstruction* hlo) {\n return absl::OkStatus();\n}\nabsl::Status DynamicShapeRemovingVisitor::HandleSetDimensionSize(\n HloInstruction* hlo) {\n *hlo->mutable_shape() = hlo->operand(0)->shape();\n hlo->mutable_shape()->set_dynamic_dimension(hlo->dimension(), false);\n return absl::OkStatus();\n}\n} \nabsl::StatusOr DynamicPadder::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n VLOG(2) << \"Pre DynamicPadder HLO:\";\n XLA_VLOG_LINES(2, module->ToString());\n HloDCE dce;\n TF_ASSIGN_OR_RETURN(bool changed, dce.Run(module, execution_threads));\n TF_ASSIGN_OR_RETURN(\n DynamicDimensionInference dynamic_dimension_inference,\n DynamicDimensionInference::Run(\n module, options_.op_supports_dynamism_handler,\n options_.custom_call_handler, options_.shape_check_mode,\n options_.assertion_generator, execution_threads));\n changed |= dynamic_dimension_inference.changed();\n std::vector computations =\n module->MakeComputationPostOrder(execution_threads);\n for (HloComputation* computation : computations) {\n for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {\n OpDynamismSupport has_dynamism_support = OpDynamismSupport::kNoSupport;\n if (options_.op_supports_dynamism_handler != nullptr) {\n has_dynamism_support = options_.op_supports_dynamism_handler(inst);\n }\n if (has_dynamism_support != OpDynamismSupport::kNoSupport) {\n continue;\n }\n if (inst->opcode() == HloOpcode::kConcatenate) {\n TF_ASSIGN_OR_RETURN(\n bool c, RewriteDynamicConcat(inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->opcode() == HloOpcode::kReverse) {\n TF_ASSIGN_OR_RETURN(bool c,\n RewriteReverse(inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->opcode() == HloOpcode::kSort) {\n TF_ASSIGN_OR_RETURN(\n bool c, RewriteDynamicSort(inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->opcode() == HloOpcode::kReshape ||\n inst->opcode() == HloOpcode::kDynamicReshape) {\n TF_ASSIGN_OR_RETURN(\n bool c, RewriteDynamicReshape(inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->IsElementwiseBinary()) {\n TF_ASSIGN_OR_RETURN(\n bool c, RewriteDynamicBinaryOp(inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->opcode() == HloOpcode::kDynamicUpdateSlice) {\n TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicUpdateSlice(\n inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->IsCustomCall(\"DynamicConvolutionInputGrad\")) {\n TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionInputGrad(\n inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->IsCustomCall(\"DynamicConvolutionForward\")) {\n TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionForward(\n inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->IsCustomCall(\"DynamicConvolutionKernelGrad\")) {\n TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionKernelGrad(\n inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->IsCustomCall(\"DynamicReduceWindowSamePadding\")) {\n TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReduceWindowSamePadding(\n inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n if (inst->IsCustomCall(\"DynamicSelectAndScatterSamePadding\")) {\n TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicSelectAndScatterSamePadding(\n inst, &dynamic_dimension_inference));\n changed |= c;\n continue;\n }\n for (int64_t operand_num = 0; operand_num < inst->operand_count();\n ++operand_num) {\n HloInstruction* original_operand = inst->mutable_operand(operand_num);\n HloInstruction* operand = original_operand;\n if (!operand->shape().IsArray()) {\n continue;\n }\n for (int64_t input_dim = 0; input_dim < operand->shape().rank();\n ++input_dim) {\n HloInstruction* operand_dynamic_size =\n dynamic_dimension_inference.GetDynamicSize(original_operand, {},\n input_dim);\n if (operand_dynamic_size == nullptr) {\n continue;\n }\n VLOG(2) << \"Has dynamic dimension of operand\" << operand_num << \" @\"\n << input_dim;\n if (ShouldSkipPadOnOperand(inst, operand_num, input_dim,\n execution_threads)) {\n continue;\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * identity_value,\n ChooseIdentityValue(inst, operand_num));\n if (identity_value == nullptr) {\n continue;\n }\n HloInstruction* padded = PadWithScalar(\n operand, input_dim, operand_dynamic_size, identity_value);\n TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(operand_num, padded));\n operand = inst->mutable_operand(operand_num);\n changed = true;\n }\n }\n }\n }\n auto call_graph = CallGraph::Build(module, execution_threads);\n computations = module->MakeComputationPostOrder(execution_threads);\n for (auto it = computations.rbegin(); it != computations.rend(); ++it) {\n HloComputation* computation = *it;\n if (!call_graph->CanReach(module->entry_computation(), computation)) {\n continue;\n }\n bool require_dynamic_output = options_.slice_dynamic_output &&\n computation == module->entry_computation();\n changed |= require_dynamic_output;\n TF_ASSIGN_OR_RETURN(bool c,\n DynamicShapeRemovingVisitor::Run(\n computation, options_.op_supports_dynamism_handler,\n &dynamic_dimension_inference, execution_threads,\n require_dynamic_output));\n changed |= c;\n }\n if (changed) {\n dynamic_padding_gauge->GetCell()->Set(changed);\n module->set_is_dynamic(true);\n }\n for (auto* computation : module->computations(execution_threads)) {\n if (!call_graph->CanReach(module->entry_computation(), computation)) {\n continue;\n }\n for (auto instruction : computation->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(\n bool c, ReplaceGetSize(instruction, &dynamic_dimension_inference));\n changed |= c;\n }\n }\n for (auto* computation : module->computations(execution_threads)) {\n if (!call_graph->CanReach(module->entry_computation(), computation)) {\n continue;\n }\n for (auto instruction : computation->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(bool c, ReplaceSetSize(instruction));\n changed |= c;\n TF_ASSIGN_OR_RETURN(c, ReplaceSetBound(instruction));\n changed |= c;\n }\n }\n if (changed) {\n HloDCE dce;\n TF_ASSIGN_OR_RETURN(bool c, dce.Run(module, execution_threads));\n changed |= c;\n }\n VLOG(2) << \"Post DynamicPadder HLO:\";\n XLA_VLOG_LINES(2, module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/dynamic_padder.h\"\n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"absl/types/span.h\"\n#include \"xla/error_spec.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/algebraic_simplifier.h\"\n#include \"xla/service/dynamic_dimension_inference.h\"\n#include \"xla/service/dynamic_dimension_simplifier.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/service/tuple_simplifier.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/client_library_test_base.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/literal_test_util.h\"\n#include \"xla/tests/llvm_irgen_test_base.h\"\n#include \"xla/tests/test_macros.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test_benchmark.h\"\n#include \"tsl/protobuf/error_codes.pb.h\"\nnamespace xla {\nnamespace {\nnamespace m = ::xla::match;\nnamespace op = xla::testing::opcode_matchers;\nOpDynamismSupport OpHasDynamismSupport(HloInstruction* hlo) {\n if (hlo->opcode() != HloOpcode::kCustomCall) {\n return OpDynamismSupport::kNoSupport;\n }\n if (hlo->custom_call_target() == \"OpWithDynamicLowering\") {\n return OpDynamismSupport::kRequired;\n }\n return OpDynamismSupport::kNoSupport;\n}\nabsl::Status CustomCallDynamicDimensionInference(\n HloInstruction* hlo, DynamicDimensionInference* inferencer) {\n if (hlo->custom_call_target() == \"OpWithDynamicLowering\") {\n if (hlo->shape().IsTuple()) {\n HloInstruction* dynamic_size =\n inferencer->GetDynamicSize(hlo->mutable_operand(0), {1}, 0);\n inferencer->SetDynamicSize(hlo, {1}, 0, dynamic_size);\n } else {\n HloInstruction* dynamic_size =\n inferencer->GetDynamicSize(hlo->mutable_operand(0), {}, 0);\n inferencer->SetDynamicSize(hlo, {}, 0, dynamic_size);\n }\n }\n return absl::OkStatus();\n}\nclass DynamicPadderTest : public HloTestBase {\n protected:\n DynamicPadderTest() : HloTestBase() { module_ = CreateNewVerifiedModule(); }\n std::unique_ptr GetHloModule(const std::string& hlo_text) {\n std::unique_ptr module =\n ParseAndReturnVerifiedModule(hlo_text).value();\n return module;\n }\n absl::StatusOr RunPadder(\n bool slice_dynamic_output = false,\n OpSupportsDynamismHandler op_supports_dynamism_handler =\n OpHasDynamismSupport,\n DynamicDimensionInference::CustomCallInferenceHandler\n custom_call_handler = CustomCallDynamicDimensionInference) {\n DynamicPadderOptions options;\n options.slice_dynamic_output = slice_dynamic_output;\n options.op_supports_dynamism_handler =\n std::move(op_supports_dynamism_handler);\n options.custom_call_handler = std::move(custom_call_handler);\n DynamicPadder padder(std::move(options));\n TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&padder, module_.get()));\n if (!changed) return false;\n TupleSimplifier tuple_simplifier;\n TF_RETURN_IF_ERROR(RunHloPass(&tuple_simplifier, module_.get()).status());\n AlgebraicSimplifier alg_simplifier(AlgebraicSimplifierOptions{});\n TF_RETURN_IF_ERROR(RunHloPass(&alg_simplifier, module_.get()).status());\n return true;\n }\n void ExpectPadded(const HloInstruction* inst) {\n EXPECT_THAT(inst,\n op::Select(op::Lt(op::Iota(), op::Broadcast(op::Parameter())),\n ::testing::_, op::Broadcast()));\n }\n HloComputation* GetScalarAddComputation() {\n auto embedded_builder = HloComputation::Builder(\"add\");\n auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {}), \"lhs\"));\n auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {}), \"rhs\"));\n embedded_builder.AddInstruction(\n HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));\n return module_->AddEmbeddedComputation(embedded_builder.Build());\n }\n std::unique_ptr module_;\n const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});\n};\nclass MemoryAlignmentTest : public HloTestBase {};\nTEST_F(MemoryAlignmentTest, DISABLED_ON_CPU(TestDataTypeFP16)) {\n const std::string hlo_text = R\"(\n HloModule TestDataTypeFP16\n update_add (p0: f16[], p1: f16[]) -> f16[] {\n p0 = f16[] parameter(0)\n p1 = f16[] parameter(1)\n ROOT out = f16[] add(p0, p1)\n }\n ENTRY main () -> f16[<=1,1] {\n c1 = s32[1]{0} constant({1})\n c2 = f16[1,1]{1,0} constant({ {0.099976} })\n shape = s32[] reshape(s32[1]{0} c1)\n dim_size = f16[<=1,1]{1,0} set-dimension-size(f16[1,1]{1,0} c2, s32[] shape),\n dimensions={0}\n ROOT out = f16[<=1,1]{1,0} scatter(f16[<=1,1]{1,0} dim_size, s32[1]{0} c1, f16[1,1]{1,0} c2),\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n to_apply=update_add\n }\n )\";\n EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));\n}\nTEST_F(DynamicPadderTest, ReduceTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});\n auto reduce_shape = ShapeUtil::MakeShape(F32, {2});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});\n auto data_param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, input_shape, \"data_param\"));\n auto* size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param, size_param, 2));\n auto negate = builder.AddInstruction(HloInstruction::CreateUnary(\n dynamic_shape, HloOpcode::kNegate, data_param));\n auto init = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(\n reduce_shape, negate, init, {0, 2}, GetScalarAddComputation()));\n EXPECT_FALSE(module_->is_dynamic());\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunPadder().status());\n ExpectPadded(reduce->operand(0));\n EXPECT_TRUE(module_->is_dynamic());\n}\nTEST_F(DynamicPadderTest, DynamicLoweringTest) {\n const std::string hlo_text = R\"(\nHloModule DynamicLowering\nENTRY main {\n param = s32[5] parameter(0)\n const = s32[] constant(3)\n param_padded = s32[<=5] set-dimension-size(param, const),\n dimensions={0}\n custom-call.1 = s32[<=5] custom-call(param_padded),\n custom_call_target=\"OpWithDynamicLowering\"\n custom-call.2 = s32[<=5] custom-call(custom-call.1),\n custom_call_target=\"OpWithDynamicLowering\"\n ROOT negate = s32[<=5] negate(custom-call.2)\n}\n)\";\n module_ = GetHloModule(hlo_text);\n TF_ASSERT_OK(RunPadder(true).status());\n auto custom_call_1 =\n module_->entry_computation()->GetInstructionWithName(\"custom-call.1\");\n auto custom_call_2 =\n module_->entry_computation()->GetInstructionWithName(\"custom-call.2\");\n HloInstruction* slice_to_dynamic = custom_call_1->mutable_operand(0);\n ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);\n ASSERT_THAT(slice_to_dynamic->custom_call_target(), \"SliceToDynamic\");\n ASSERT_EQ(custom_call_2->user_count(), 1);\n HloInstruction* pad_to_static = custom_call_2->users()[0];\n ASSERT_THAT(pad_to_static->opcode(), HloOpcode::kCustomCall);\n ASSERT_THAT(pad_to_static->custom_call_target(), \"PadToStatic\");\n slice_to_dynamic = module_->entry_computation()->root_instruction();\n ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);\n ASSERT_THAT(slice_to_dynamic->custom_call_target(), \"SliceToDynamic\");\n}\nTEST_F(DynamicPadderTest, DynamicLoweringTestTupleInput) {\n const std::string hlo_text = R\"(\nHloModule DynamicLowering\nENTRY main {\n param = s32[5] parameter(0)\n const = s32[] constant(3)\n param_padded = s32[<=5] set-dimension-size(param, const),\n dimensions={0}\n tuple_arg = (s32[], s32[<=5]) tuple(const, param_padded)\n custom-call.1 = (s32[], s32[<=5]) custom-call(tuple_arg),\n custom_call_target=\"OpWithDynamicLowering\"\n custom-call.2 = (s32[], s32[<=5]) custom-call(custom-call.1),\n custom_call_target=\"OpWithDynamicLowering\"\n data = s32[<=5]{0} get-tuple-element(custom-call.2), index=1\n ROOT negate = s32[<=5] negate(data)\n}\n)\";\n module_ = GetHloModule(hlo_text);\n TF_ASSERT_OK(RunPadder(true).status());\n auto* root = module_->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::CustomCall(\n {\"SliceToDynamic\"}, op::Negate(),\n op::GetTupleElement(op::CustomCall({\"PadToStatic\"}))));\n HloInstruction* negate = root->mutable_operand(0);\n EXPECT_THAT(\n negate,\n op::Negate(op::GetTupleElement(op::CustomCall(\n {\"PadToStatic\"}, op::GetTupleElement(op::CustomCall(\n {\"OpWithDynamicLowering\"}, ::testing::_))))));\n auto custom_call_1 =\n module_->entry_computation()->GetInstructionWithName(\"custom-call.1\");\n EXPECT_THAT(custom_call_1,\n op::CustomCall({\"OpWithDynamicLowering\"},\n op::Tuple(op::Constant(),\n op::CustomCall({\"SliceToDynamic\"}))));\n}\nTEST_F(DynamicPadderTest, DynamicOutputNestedTuple) {\n const std::string hlo_text = R\"(\nHloModule DynamicLowering\nENTRY main {\n param = s32[5] parameter(0)\n const = s32[] constant(3)\n const2 = s32[] constant(4)\n param_padded = s32[<=5] set-dimension-size(param, const),\n dimensions={0}\n tuple0 = (s32[], s32[<=5]) tuple(const, param_padded)\n ROOT tuple1 = (s32[], (s32[], s32[<=5])) tuple(const2, tuple0)\n}\n)\";\n module_ = GetHloModule(hlo_text);\n TF_ASSERT_OK(RunPadder(true).status());\n TF_ASSERT_OK(TupleSimplifier().Run(module_.get()).status());\n XLA_LOG_LINES(INFO, module_->ToString());\n auto* root = module_->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::Tuple(op::Constant(), op::Tuple()));\n HloInstruction* nested_tuple = root->mutable_operand(1);\n EXPECT_THAT(nested_tuple,\n op::Tuple(op::Constant(), op::CustomCall({\"SliceToDynamic\"})));\n}\nTEST_F(DynamicPadderTest, ConvolutionTest) {\n auto builder = HloComputation::Builder(TestName());\n constexpr int xdim = 3;\n constexpr int ydim = 2;\n constexpr int zdim = 1;\n auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});\n auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});\n auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim});\n auto xy_shape_dynamic =\n ShapeUtil::MakeShape(F32, {xdim, ydim}, {false, true});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, xy_shape, \"A\"));\n auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, yz_shape, \"B\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);\n dnums.set_kernel_input_feature_dimension(0);\n dnums.set_kernel_output_feature_dimension(1);\n dnums.set_input_batch_dimension(0);\n dnums.set_output_batch_dimension(1);\n dnums.set_output_feature_dimension(0);\n Window window;\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n xy_shape_dynamic, a_param, size_param, 1));\n auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(\n zx_shape, a_param, b_param, 1,\n 1, window, dnums,\n HloTestBase::DefaultPrecisionConfig(2)));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunPadder().status());\n ExpectPadded(conv->operand(0));\n}\nTEST_F(DynamicPadderTest, ConvolutionNoPad) {\n auto builder = HloComputation::Builder(TestName());\n constexpr int xdim = 3;\n constexpr int ydim = 2;\n constexpr int zdim = 1;\n auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});\n auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});\n auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, false});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, xy_shape, \"A\"));\n auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, yz_shape, \"B\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param, 0));\n auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);\n dnums.set_kernel_input_feature_dimension(0);\n dnums.set_kernel_output_feature_dimension(1);\n dnums.set_input_batch_dimension(0);\n dnums.set_output_batch_dimension(1);\n dnums.set_output_feature_dimension(0);\n Window window;\n auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(\n zx_shape, a_param, b_param, 1,\n 1, window, dnums,\n HloTestBase::DefaultPrecisionConfig(2)));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunPadder().status());\n EXPECT_THAT(conv->operand(0), op::Parameter());\n}\nTEST_F(DynamicPadderTest, ReduceWindowNoPadForTrivialWindow) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});\n auto reduce_shape = ShapeUtil::MakeShape(F32, {3, 5}, {false, true});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {false, true});\n auto input = builder.AddInstruction(\n HloInstruction::CreateParameter(0, input_shape, \"input\"));\n auto* size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, input, size_param, 1));\n auto init = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n TF_ASSERT_OK_AND_ASSIGN(Window window, ParseWindow(\"size=2x1 pad=0_0x0_0\"));\n auto output = builder.AddInstruction(HloInstruction::CreateReduceWindow(\n reduce_shape, input, init, window, GetScalarAddComputation()));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunPadder().status());\n EXPECT_THAT(output->operand(0), op::Parameter());\n}\nTEST_F(DynamicPadderTest, VariadicReduceWindowNoPadForTrivialWindow) {\n const std::string hlo_text = R\"(\nHloModule VariadicReduceWindowNoPadForTrivialWindow\nadd_f32 (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) {\n a = f32[] parameter(0)\n b = s32[] parameter(1)\n c = f32[] parameter(2)\n d = s32[] parameter(3)\n add.0 = f32[] add(a, c)\n add.1 = s32[] add(b, d)\n ROOT out = tuple(add.0, add.1)\n}\nENTRY main {\n input.0 = f32[4, 5] parameter(0)\n input.1 = s32[4, 5] parameter(1)\n size_param.0 = s32[] parameter(2)\n size_param.1 = s32[] parameter(3)\n input_dynamic.0 = f32[4,<=5] set-dimension-size(input.0, size_param.0), dimensions={1}\n input_dynamic.1 = s32[4,<=5] set-dimension-size(input.1, size_param.0), dimensions={1}\n init.0 = f32[] constant(0.0)\n init.1 = s32[] constant(0)\n ROOT output = (f32[3, <=5], s32[3, <=5]) reduce-window(input_dynamic.0, input_dynamic.1, init.0, init.1), window={size=2x1 pad=0_0x0_0}, to_apply=add_f32\n}\n)\";\n const int kNumParams = 2;\n module_ = ParseAndReturnVerifiedModule(hlo_text).value();\n TF_ASSERT_OK(RunPadder().status());\n for (int i = 0; i < kNumParams; ++i) {\n EXPECT_THAT(module_->entry_computation()->root_instruction()->operand(i),\n op::Parameter());\n }\n}\nTEST_F(DynamicPadderTest, PadS8ToS32Dot) {\n const std::string hlo_text = R\"(\nHloModule test\nENTRY test {\n a = s8[<=16,32] parameter(0)\n b = s8[32,64] parameter(1)\n ROOT root = s32[<=16,64] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n module_ = GetHloModule(hlo_text);\n TF_ASSERT_OK(RunPadder(true).status());\n EXPECT_THAT(module_->entry_computation()->root_instruction(),\n GmockMatch(m::CustomCall({\"SliceToDynamic\"},\n m::Dot(m::Op().WithShape(S8, {16, 32}),\n m::Op().WithShape(S8, {32, 64}))\n .WithShape(S32, {16, 64}),\n m::Op(), m::Op())));\n}\nTEST_F(DynamicPadderTest, PadToStaticForCustomCall) {\n const std::string hlo_text = R\"(\nHloModule test\nENTRY test {\n a = f32[64] parameter(0)\n ROOT c = f32[<=128] custom-call(a),\n custom_call_target=\"UnknownOp\"\n}\n)\";\n module_ = GetHloModule(hlo_text);\n TF_ASSERT_OK(RunPadder(true).status());\n EXPECT_THAT(module_->entry_computation()->root_instruction(),\n GmockMatch(m::CustomCall({\"UnknownOp\"})));\n}\nTEST_F(DynamicPadderTest, WhileLoopDynamicShapeChangeToStatic) {\n const std::string hlo_text = R\"(\nHloModule WhileLoopDynamicShapeChangeToStatic\n %cond_wrapper.19447 {\n param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)\n %get-tuple-element.184 = s32[] get-tuple-element(param), index=0\n %get-tuple-element.185 = s32[] get-tuple-element(param), index=1\n ROOT %compare.28 = pred[] compare(s32[] %get-tuple-element.184, s32[] %get-tuple-element.185), direction=LT\n }\n%while_body_78894_grad_83711__.18882 {\n param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)\n %get-tuple-element.184 = s32[] get-tuple-element(param), index=0\n %get-tuple-element.185 = s32[] get-tuple-element(param), index=1\n %add.1 = s32[] add(get-tuple-element.184, get-tuple-element.184)\n %gte.2 = f32[] get-tuple-element(param), index=2\n %broadcast.19389 = f32[32,216]{1,0} broadcast(f32[] %gte.2), dimensions={}\n %constant.32 = s32[] constant(32)\n %set-dimension-size = f32[<=32,216]{1,0} set-dimension-size(f32[32,216]{1,0} %broadcast.19389, s32[] %constant.32), dimensions={0}\n ROOT tuple = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(add.1, %get-tuple-element.185, %gte.2, %set-dimension-size)\n}\nENTRY main {\n param = f32[] parameter(0)\n param.1 = f32[<=32,216]{1,0} parameter(1)\n const = s32[] constant(3)\n const2 = s32[] constant(4)\n %tuple.18877 = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(const, const2, param, param.1)\n %while.19451 = (s32[], s32[], f32[], f32[<=32,216]{1,0})\n while((s32[], s32[], f32[], f32[<=32,216]{1,0})\n %tuple.18877), condition=%cond_wrapper.19447, body=%while_body_78894_grad_83711__.18882\n ROOT result = f32[<=32,216]{1,0} get-tuple-element(while.19451), index=3\n }\n)\";\n module_ = GetHloModule(hlo_text);\n TF_ASSERT_OK(RunPadder(true).status());\n XLA_LOG_LINES(INFO, module_->ToString());\n auto* root = module_->entry_computation()->root_instruction();\n EXPECT_EQ(root->shape(), ShapeUtil::MakeShape(F32, {32, 216}, {true, false}));\n HloInstruction* while_inst = nullptr;\n for (HloInstruction* inst :\n module_->entry_computation()->MakeInstructionPostOrder()) {\n if (inst->opcode() == HloOpcode::kWhile) {\n ASSERT_EQ(while_inst, nullptr)\n << \"while_inst: \" << while_inst->name() << \", inst: \" << inst->name();\n while_inst = inst;\n }\n }\n EXPECT_EQ(while_inst->shape(),\n ShapeUtil::MakeTupleShape({ShapeUtil::MakeScalarShape(S32),\n ShapeUtil::MakeScalarShape(S32),\n ShapeUtil::MakeScalarShape(F32),\n ShapeUtil::MakeShape(F32, {32, 216}),\n ShapeUtil::MakeScalarShape(S32)}));\n}\nTEST_F(DynamicPadderTest, WhileLoopCarriesRequiredDynamicShape) {\n const std::string hlo_text = R\"(\nHloModule WhileLoopCarriesRequiredDynamicShape\n%cond {\n param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)\n current = s32[] get-tuple-element(param), index=5\n last = s32[] get-tuple-element(param), index=6\n ROOT result = pred[] compare(current, last), direction=LT\n}\n%body {\n param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)\n var = f32[1024] get-tuple-element(param), index=0\n input0 = f32[<=64] get-tuple-element(param), index=1\n grad0 = f32[32] get-tuple-element(param), index=2\n input1 = f32[<=64] get-tuple-element(param), index=3\n act1 = f32[32] get-tuple-element(param), index=4\n grad1 = f32[32] custom-call(act1), custom_call_target=\"ComputeGradients\"\n var1 = f32[1024] custom-call(var, input0, grad0), custom_call_target=\"ApplyGradients\", output_to_operand_aliasing={{}: (0, {})}\n token2 = token[] get-tuple-element(param), index=7\n infeed2 = (f32[<=64], token[]) infeed(token2)\n input2 = f32[<=64] get-tuple-element(infeed2), index=0\n act2 = f32[32] custom-call(var1, input2), custom_call_target=\"ComputeActivations\"\n current = s32[] get-tuple-element(param), index=5\n constant1 = s32[] constant(1)\n add = s32[] add(current, constant1)\n last = s32[] get-tuple-element(param), index=6\n token3 = token[] get-tuple-element(infeed2), index=1\n ROOT result = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) tuple(var1, input1, grad1, input2, act2, add, last, token3)\n}\nENTRY main {\n last = s32[] parameter(0)\n var = f32[1024] parameter(1)\n token0 = token[] after-all()\n infeed0 = (f32[<=64], token[]) infeed(token0)\n input0 = f32[<=64] get-tuple-element(infeed0), index=0\n act0 = f32[32] custom-call(var, input0), custom_call_target=\"ComputeActivations\"\n grad0 = f32[32] custom-call(act0), custom_call_target=\"ComputeGradients\"\n token1 = token[] get-tuple-element(infeed0), index=1\n infeed1 = (f32[<=64], token[]) infeed(token1)\n input1 = f32[<=64] get-tuple-element(infeed1), index=0\n act1 = f32[32] custom-call(var, input1), custom_call_target=\"ComputeActivations\"\n token2 = token[] get-tuple-element(infeed1), index=1\n zero = s32[] constant(0)\n tuple = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) tuple(var, input0, grad0, input1, act1, zero, last, token2)\n while = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) while(tuple), condition=%cond, body=%body\n ROOT result = f32[1024] get-tuple-element(while), index=0\n}\n)\";\n module_ = GetHloModule(hlo_text);\n auto op_supports_dynamism = [](HloInstruction* hlo) {\n if (hlo->opcode() != HloOpcode::kCustomCall) {\n return OpDynamismSupport::kNoSupport;\n }\n if (hlo->custom_call_target() == \"ComputeActivations\" ||\n hlo->custom_call_target() == \"ApplyGradients\") {\n return OpDynamismSupport::kRequired;\n }\n return OpDynamismSupport::kNoSupport;\n };\n auto custom_call_handler = [](HloInstruction* hlo,\n DynamicDimensionInference* inference) {\n return absl::OkStatus();\n };\n TF_ASSERT_OK(\n RunPadder(\n true,\n std::move(op_supports_dynamism),\n std::move(custom_call_handler))\n .status());\n XLA_VLOG_LINES(1, module_->ToString());\n for (HloComputation* computation : module_->computations()) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kCustomCall) {\n EXPECT_NE(instruction->custom_call_target(), \"PadToStatic\");\n EXPECT_NE(instruction->custom_call_target(), \"SliceToDynamic\");\n if (instruction->custom_call_target() == \"ComputeActivations\") {\n EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());\n } else if (instruction->custom_call_target() == \"ApplyGradients\") {\n EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());\n }\n } else if (instruction->opcode() == HloOpcode::kWhile) {\n const Shape& shape = instruction->shape();\n EXPECT_TRUE(shape.tuple_shapes(1).is_dynamic());\n EXPECT_TRUE(shape.tuple_shapes(3).is_dynamic());\n }\n }\n }\n}\nTEST_F(DynamicPadderTest, HandleReshapeCheckPastReshape) {\n auto hlo_text = R\"(\nHloModule ReshapeDynamicDimension\nENTRY main {\n p0 = f32[4,511,432]{2,1,0} parameter(0)\n p1 = s32[] parameter(1)\n p2 = f32[432,337]{1,0:T(8,128)} parameter(2)\n p0_dynamic = f32[<=4,511,432] set-dimension-size(p0, p1), dimensions={0}\n reshape.4179 = f32[<=2044,432]{1,0} reshape(p0_dynamic)\n dot.4180 = f32[<=2044,337]{1,0} dot(reshape.4179, p2), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n transpose.4181 = f32[<=2044,337]{1,0} transpose(dot.4180), dimensions={0,1}\n ROOT reshape.4183 = f32[<=4,511,337]{2,1,0} reshape(transpose.4181)\n})\";\n module_ = GetHloModule(hlo_text);\n TF_ASSERT_OK(RunPadder(true).status());\n VLOG(3) << module_->ToString();\n CHECK(module_->is_dynamic());\n CHECK(module_->entry_computation()\n ->root_instruction()\n ->shape()\n .is_dynamic_dimension(0));\n}\nclass ExecutionTest : public HloTestBase {\n protected:\n std::unique_ptr GetHloModule(const std::string& hlo_text) {\n std::unique_ptr module =\n ParseAndReturnVerifiedModule(hlo_text).value();\n return module;\n }\n absl::StatusOr PadAndExecute(std::unique_ptr module,\n absl::Span arguments,\n bool slice_dynamic_output = true) {\n if (!slice_dynamic_output) {\n auto new_config = module->config();\n new_config.mutable_entry_computation_layout()\n ->mutable_result_layout()\n ->ClearDynamicShape();\n module->set_config(new_config);\n }\n DynamicPadderOptions options;\n options.slice_dynamic_output = slice_dynamic_output;\n DynamicPadder padder(options);\n TF_CHECK_OK(padder.Run(module.get()).status());\n HloDCE dce;\n TF_CHECK_OK(dce.Run(module.get()).status());\n return Execute(std::move(module), {arguments});\n }\n};\nXLA_TEST_F(ExecutionTest, ScatterUpdate) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n ROOT rhs = s32[] parameter(1)\n}\nENTRY main {\n operand = s32[3,3] parameter(0)\n indices = s32[INDICES_BOUND] parameter(1)\n updates = s32[INDICES_BOUND,3] parameter(2)\n dynamic_size = s32[] parameter(3)\n indices_dynamic = s32[<=INDICES_BOUND] set-dimension-size(indices, dynamic_size), dimensions={0}\n updates_dynamic = s32[<=INDICES_BOUND,3] set-dimension-size(updates, dynamic_size), dimensions={0}\n ROOT scatter = s32[3,3] scatter(operand, indices_dynamic, updates_dynamic),\n to_apply=update_s32,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n}\n)\";\n const std::string hlo_text_not_padded =\n absl::StrReplaceAll(hlo_text, {{\"INDICES_BOUND\", \"2\"}});\n auto module_not_padded = GetHloModule(hlo_text_not_padded);\n Literal operand =\n LiteralUtil::CreateR2({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});\n Literal scatter_indices = LiteralUtil::CreateR1({0, 2});\n Literal updates =\n LiteralUtil::CreateR2({{10, 20, 30}, {70, 80, 90}});\n Literal dynamic_size = LiteralUtil::CreateR0(2);\n Literal not_padded =\n ExecuteAndTransfer(std::move(module_not_padded),\n {&operand, &scatter_indices, &updates, &dynamic_size});\n const std::string hlo_text_padded =\n absl::StrReplaceAll(hlo_text, {{\"INDICES_BOUND\", \"4\"}});\n auto module_padded = GetHloModule(hlo_text_padded);\n Literal scatter_indices_padded = LiteralUtil::CreateR1({0, 2, 0, 4});\n Literal updates_padded = LiteralUtil::CreateR2(\n {{10, 20, 30}, {70, 80, 90}, {30, 22, 11}, {-1, 20, -1}});\n DynamicPadder padder;\n TF_CHECK_OK(padder.Run(module_padded.get()).status());\n TF_ASSERT_OK_AND_ASSIGN(Literal padded,\n PadAndExecute(std::move(module_padded),\n {&operand, &scatter_indices_padded,\n &updates_padded, &dynamic_size}));\n EXPECT_EQ(padded, not_padded);\n}\nXLA_TEST_F(ExecutionTest, ScatterUpdateWindowDim) {\n const std::string hlo_text = R\"(\nHloModule ScatterUpdateWindowDim\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n ROOT rhs = s32[] parameter(1)\n}\nENTRY main {\n operand = s32[1,2,3] parameter(0)\n indices = s32[1] parameter(1)\n updates = s32[2,3,1] parameter(2)\n dynamic_size = s32[] constant(1)\n operand_dynamic = s32[1, <=2, 3] set-dimension-size(operand, dynamic_size),\n dimensions={1}\n updates_dynamic = s32[<=2, 3, 1] set-dimension-size(updates, dynamic_size),\n dimensions={0}\n ROOT scatter = s32[1, <=2, 3] scatter(operand_dynamic, indices, updates_dynamic),\n to_apply=update_s32,\n update_window_dims={0, 1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n}\n)\";\n auto hlo_module = GetHloModule(hlo_text);\n Literal operand = LiteralUtil::CreateR3({{{0, 0, 0}, {0, 0, 0}}});\n Literal scatter_indices = LiteralUtil::CreateR1({0});\n Literal updates =\n LiteralUtil::CreateR3({{{10}, {20}, {30}}, {{70}, {80}, {90}}});\n TF_ASSERT_OK_AND_ASSIGN(\n Literal padded,\n PadAndExecute(std::move(hlo_module),\n {&operand, &scatter_indices, &updates}, false));\n Literal expected =\n LiteralUtil::CreateR3({{{10, 20, 30}, {70, 80, 90}}});\n EXPECT_EQ(padded, expected);\n}\nXLA_TEST_F(ExecutionTest, ScatterUpdateF32) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_f32 (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n ROOT rhs = f32[] parameter(1)\n}\nENTRY main {\n operand = f32[3,3] parameter(0)\n indices = s32[2] parameter(1)\n updates = f32[2,3] parameter(2)\n dynamic_size = s32[] parameter(3)\n indices_dynamic = s32[<=2] set-dimension-size(indices, dynamic_size), dimensions={0}\n updates_dynamic = f32[<=2,3] set-dimension-size(updates, dynamic_size), dimensions={0}\n ROOT scatter = f32[3,3] scatter(operand, indices_dynamic, updates_dynamic),\n to_apply=update_f32,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n}\n)\";\n auto module_not_padded = GetHloModule(hlo_text);\n Literal operand = LiteralUtil::CreateR2(\n {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}});\n Literal scatter_indices = LiteralUtil::CreateR1({0, 2});\n Literal updates =\n LiteralUtil::CreateR2({{10.0, 20.0, 30.0}, {70.0, 80.0, 90.0}});\n Literal dynamic_size = LiteralUtil::CreateR0(1);\n auto module_padded = GetHloModule(hlo_text);\n DynamicPadder padder;\n TF_CHECK_OK(padder.Run(module_padded.get()).status());\n TF_ASSERT_OK_AND_ASSIGN(\n Literal not_padded,\n PadAndExecute(std::move(module_padded),\n {&operand, &scatter_indices, &updates, &dynamic_size}));\n EXPECT_EQ(LiteralUtil::CreateR2(\n {{10.0, 20.0, 30.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}}),\n not_padded);\n}\nXLA_TEST_F(ExecutionTest, WholeDimensionGather) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[3, 2, 1] parameter(0)\n size = s32[] constant(1)\n param_padded = s32[3, <=2, 1] set-dimension-size(param, size), dimensions={1}\n index = s32[] constant(1)\n gather = s32[<=2,1]{1,0} gather(param_padded, index),\n offset_dims={0,1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=0,\n slice_sizes={1,2,1}\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(gather, init),\n dimensions={0, 1},\n to_apply=update_s32\n}\n)\";\n Literal operand =\n LiteralUtil::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});\n auto module = GetHloModule(hlo_text);\n DynamicPadder padder;\n TF_CHECK_OK(padder.Run(module.get()).status());\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(3);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, TwoDimensionReduce) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[INDICES_BOUND, INDICES_BOUND] parameter(0)\n dynamic_size = s32[] parameter(1)\n param_0 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param, dynamic_size), dimensions={0}\n param_1 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param_0, dynamic_size), dimensions={1}\n const = s32[] constant(0)\n ROOT reduce = s32[] reduce(param_1, const),\n dimensions={0, 1},\n to_apply=update_s32\n}\n)\";\n const std::string hlo_text_not_padded =\n absl::StrReplaceAll(hlo_text, {{\"INDICES_BOUND\", \"2\"}});\n auto module_not_padded = GetHloModule(hlo_text_not_padded);\n Literal operand = LiteralUtil::CreateR2({{1, 2}, {4, 5}});\n Literal dynamic_size = LiteralUtil::CreateR0(2);\n Literal not_padded = ExecuteAndTransfer(std::move(module_not_padded),\n {&operand, &dynamic_size});\n const std::string hlo_text_padded =\n absl::StrReplaceAll(hlo_text, {{\"INDICES_BOUND\", \"4\"}});\n auto module_padded = GetHloModule(hlo_text_padded);\n Literal operand_padded = LiteralUtil::CreateR2(\n {{1, 2, 3, 4}, {4, 5, 6, 7}, {1, 2, 3, 4}, {4, 5, 6, 7}});\n DynamicPadder padder;\n TF_CHECK_OK(padder.Run(module_padded.get()).status());\n TF_ASSERT_OK_AND_ASSIGN(Literal padded,\n PadAndExecute(std::move(module_padded),\n {&operand_padded, &dynamic_size}));\n EXPECT_EQ(padded, not_padded);\n}\nXLA_TEST_F(ExecutionTest, DynamicDimensionClamp) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowTenaryV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[5] parameter(0)\n const = s32[] constant(3)\n param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}\n clamp = s32[<=5] clamp(param_padded, param_padded, param_padded)\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(clamp, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({1, 2, 3, 4, 5});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(6);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicConcat) {\n const std::string hlo_text = R\"(\nHloModule DynamicConcat\nENTRY main {\n param_0 = s32[3] parameter(0)\n param_1 = s32[3] parameter(1)\n param_2 = s32[3] parameter(2)\n size = s32[] constant(2)\n param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0}\n param_padded_2 = s32[<=3] set-dimension-size(param_2, size), dimensions={0}\n ROOT %concatenate = s32[<=9]\n concatenate(s32[<=3] param_padded_0, s32[<=3] param_1, s32[<=3] param_padded_2),\n dimensions={0}\n}\n)\";\n Literal operand_0 =\n LiteralUtil::CreateR1({1, 2, -1}); \n Literal operand_1 =\n LiteralUtil::CreateR1({3, 4, 5}); \n Literal operand_2 =\n LiteralUtil::CreateR1({6, 7, -1}); \n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(\n Literal result,\n PadAndExecute(std::move(module), {&operand_0, &operand_1, &operand_2},\n false));\n result.SetDynamicSize(0, 7);\n Literal expected = LiteralUtil::CreateR1({1, 2, 3, 4, 5, 6, 7});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicReverseSingleDim) {\n const std::string hlo_text = R\"(\nHloModule DynamicConcat\nENTRY main {\n param_0 = s32[3] parameter(0)\n size = s32[] constant(2)\n param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0}\n ROOT %reverse = s32[<=3]\n reverse(s32[<=3] param_padded_0),\n dimensions={0}\n}\n)\";\n Literal operand_0 =\n LiteralUtil::CreateR1({1, 2, -1}); \n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(\n Literal result, PadAndExecute(std::move(module), {&operand_0}, false));\n result.SetDynamicSize(0, 2);\n Literal expected = LiteralUtil::CreateR1({2, 1});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicReverseMultiDims) {\n const std::string hlo_text = R\"(\nHloModule DynamicConcat\nENTRY main {\n param_0 = s32[3, 3] parameter(0)\n size = s32[] constant(2)\n param_padded_0 = s32[<=3, 3] set-dimension-size(param_0, size), dimensions={0}\n param_padded_1 = s32[<=3, <=3] set-dimension-size(param_padded_0, size),\n dimensions={1}\n ROOT %reverse = s32[<=3, <=3]\n reverse(s32[<=3, <=3] param_padded_1),\n dimensions={0, 1}\n}\n)\";\n Literal operand_0 = LiteralUtil::CreateR2(\n {{1, 2, -1}, {3, 4, -1}, {-1, -1, -1}}); \n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(\n Literal result, PadAndExecute(std::move(module), {&operand_0}, false));\n result.SetDynamicSize(0, 2);\n result.SetDynamicSize(1, 2);\n Literal expected = LiteralUtil::CreateR2({{4, 3}, {2, 1}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicDimensionReduce) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[5] parameter(0)\n const = s32[] constant(3)\n param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(param_padded, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({1, 2, 3, 4, 5});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(6);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, InputMinorDimensionReshape) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[1, 2, 5, 1] parameter(0)\n const = s32[] constant(3)\n param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2}\n reshaped = s32[<=10] reshape(param_padded)\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(reshaped, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR4(\n {{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(18);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, SliceSingleElement) {\n const std::string hlo_text = R\"(\nHloModule Slicing\nENTRY main {\n param = s32[5] parameter(0)\n const = s32[] constant(3)\n param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}\n ROOT slice = s32[1]{0} slice(param_padded), slice={[0:1]}\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({0, 1, 2, 3, 4});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR1({0});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, OutputMinorDimensionReshape) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[12] parameter(0)\n const = s32[] constant(8)\n param_padded = s32[<=12] set-dimension-size(param, const), dimensions={0}\n reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1\n init = s32[] constant(0)\n ROOT reduce = s32[2, 2] reduce(reshaped, init),\n dimensions={1},\n to_apply=update_s32\n}\n)\";\n Literal operand =\n LiteralUtil::CreateR1({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR2({{2, 4}, {10, 12}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMajor) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[2, 6] parameter(0)\n const = s32[] constant(4)\n param_padded = s32[2, <=6] set-dimension-size(param, const), dimensions={1}\n reshaped = s32[2, 2, <=3] reshape(param_padded), inferred_dimension=2\n init = s32[] constant(0)\n ROOT reduce = s32[2, 2] reduce(reshaped, init),\n dimensions={2},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR2(\n {{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 10, 11}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR2({{1, 5}, {13, 17}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMinor) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[6, 2] parameter(0)\n const = s32[] constant(4)\n param_padded = s32[<=6, 2] set-dimension-size(param, const), dimensions={0}\n reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1\n init = s32[] constant(0)\n ROOT reduce = s32[2, 2] reduce(reshaped, init),\n dimensions={1},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR2(\n {{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR2({{2, 4}, {10, 12}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicInputFeature) {\n const std::string hlo_text = R\"(\nHloModule DynamicInputFeature\nENTRY main {\n param = f32[1, 1, 5] parameter(0)\n const = s32[] constant(5)\n one = f32[] constant(1)\n kernel = f32[1,5,1]{2,1,0} broadcast(f32[] one), dimensions={}\n param_dynamic = f32[1,1,<=5] set-dimension-size(param, const), dimensions={2}\n ROOT conv = f32[1, 1, 1]{2,1,0} custom-call(f32[1, 1, <=5] param_dynamic, f32[1,<=5,1]{2,1,0} kernel),\n window={size=1 pad=0_0},\n dim_labels=b0f_0io->b0f,\n padding_type=PADDING_VALID,\n custom_call_target=\"DynamicConvolutionForward\"\n}\n)\";\n Literal operand = LiteralUtil::CreateR3({{{1, 2, 3, 4, 5}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR3({{{15}}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(LlvmIrGenTestBase, LargeDynamicInput) {\n#ifndef XLA_TEST_BACKEND_GPU\n GTEST_SKIP();\n#endif\n const std::string hlo_text = R\"( \nHloModule LargeDynamicInput\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT add = f32[] add(lhs, rhs)\n}\nENTRY main {\n param = f32[<=20,<=20,<=20,<=20,<=20,<=20,<=20,<=20] parameter(0)\n zero = f32[] constant(0)\n ROOT out = reduce(param, zero), to_apply=add, dimensions={0,1,2,3,4,5,6,7}\n}\n)\";\n CompileAndVerifyIr(hlo_text, R\"(\nCHECK: ret void\n)\",\n true);\n}\nXLA_TEST_F(ExecutionTest, DynamicDimensionReshapeUnchanged) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[1, 2, 5, 1] parameter(0)\n const = s32[] constant(3)\n param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2}\n reshaped = s32[2, <=5] reshape(param_padded)\n init = s32[] constant(0)\n ROOT reduce = s32[2] reduce(reshaped, init),\n dimensions={1},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR4(\n {{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR1({6, 12});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DegeneratedDimension) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[1, 2, 5, 1] parameter(0)\n size = s32[] constant(0)\n param_padded = s32[<=1, 2, 5, 1] set-dimension-size(param, size),\n dimensions={0}\n reshaped = s32[<=10] reshape(param_padded)\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(reshaped, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR4(\n {{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(0);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, ReshapeSplitCombineSameTime) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[4, 2, 2] parameter(0)\n two = s32[] constant(2)\n one = s32[] constant(1)\n param_padded_partial = s32[<=4, 2, 2] set-dimension-size(param, two),\n dimensions={0}\n param_padded_dynamic = s32[<=4, 2, <=2] set-dimension-size(param_padded_partial,\n one),\n dimensions={2}\n reshaped = s32[2, <=2, <=4] reshape(param_padded_dynamic),\n inferred_dimension=1\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(reshaped, init),\n dimensions={0, 1, 2},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR3({{{0, -1}, {1, -1}},\n {{2, -1}, {3, -1}},\n {{-1, -1}, {-1, -1}},\n {{-1, -1}, {-1, -1}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(6);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, ReshapeComplicated) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[2, 4, 4] parameter(0)\n two = s32[] constant(2)\n param_padded_dynamic = s32[2, <=4, 4] set-dimension-size(param, two),\n dimensions={1}\n reshaped = s32[<=16, 2] reshape(param_padded_dynamic), inferred_dimension=0\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(reshaped, init),\n dimensions={0, 1},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR3(\n {{{1, 2, 3, 4}, {5, 6, 7, 8}, {-1, -1, -1, -1}, {-1, -1, -1, -1}},\n {{9, 10, 11, 12},\n {13, 14, 15, 16},\n {-1, -1, -1, -1},\n {-1, -1, -1, -1}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(136);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, WhileLoopStack) {\n const std::string hlo_text = R\"(\nHloModule module\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nbody {\n stack = (s32[<=4,2]) parameter(0)\n stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0\n stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}\n zero = s32[] constant(0)\n one = s32[] constant(1)\n new_data = s32[1, 2] broadcast(s32[] stack_size), dimensions={}\n new_stack_size = s32[] add(stack_size, one)\n new_stack_buffer = s32[<=4, 2] set-dimension-size(stack_buffer, new_stack_size), dimensions={0}\n new_stack = s32[<=4, 2] dynamic-update-slice(new_stack_buffer, new_data, stack_size, zero)\n ROOT new_stack_tuple = (s32[<=4,2]) tuple(new_stack)\n}\ncondition {\n stack = (s32[<=4,2]) parameter(0)\n stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0\n stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}\n three = s32[] constant(3)\n ROOT less-than = pred[] compare(s32[] stack_size, s32[] three), direction=LT\n}\nENTRY entry {\n zero = s32[] constant(0)\n pad = s32[] constant(-1)\n stack_buffer_input = s32[4, 2] broadcast(s32[] pad), dimensions={}\n stack_buffer_input_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, zero), dimensions={0}\n input_tuple = (s32[<=4 ,2]) tuple(stack_buffer_input_dynamic)\n while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition\n stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0\n ROOT reduce = s32[2] reduce(stack_buffer, zero),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));\n Literal expected = LiteralUtil::CreateR1({{3, 3}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicAddWithImplicitBroadcast) {\n const std::string hlo_text = R\"(\nHloModule module\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY entry {\n zero = s32[] constant(0)\n one = s32[] constant(1)\n two = s32[] constant(2)\n three = s32[] constant(3)\n input1 = s32[4, 2] iota(), iota_dimension=0\n ones = s32[4, 2] broadcast(one), dimensions={}\n input1_added = s32[4, 2] add(input1, ones)\n input1_dynamic = s32[<=4, 2] set-dimension-size(input1_added, one), dimensions={0}\n input2 = s32[4, 2] broadcast(two), dimensions={}\n input2_dynamic = s32[<=4, 2] set-dimension-size(input2, three), dimensions={0}\n add = s32[<=4, 2] add(input1_dynamic, input2_dynamic)\n ROOT reduce = s32[2] reduce(add, zero),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));\n Literal expected = LiteralUtil::CreateR1({{9, 9}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicAddWithImplicitSlice) {\n const std::string hlo_text = R\"(\nHloModule module\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY entry {\n zero = s32[] constant(0)\n one = s32[] constant(1)\n two = s32[] constant(2)\n three = s32[] constant(3)\n input1 = s32[4, 2] broadcast(one), dimensions={}\n input1_dynamic = s32[<=4, 2] set-dimension-size(input1, three), dimensions={0}\n input2 = s32[4, 2] broadcast(two), dimensions={}\n input2_dynamic = s32[<=4, 2] set-dimension-size(input2, two), dimensions={0}\n add = s32[<=4, 2] add(input1_dynamic, input2_dynamic)\n ROOT reduce = s32[2] reduce(add, zero),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));\n Literal expected = LiteralUtil::CreateR1({{6, 6}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicStackPop) {\n const std::string hlo_text = R\"(\nHloModule module\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nbody {\n param_tuple = (s32[<=4,2]) parameter(0)\n param = s32[<=4, 2] get-tuple-element(param_tuple), index=0\n one = s32[] constant(1)\n size = s32[] get-dimension-size(param), dimensions={0}\n new_size = s32[] subtract(size, one)\n output = s32[<=4, 2] set-dimension-size(param, new_size), dimensions={0}\n ROOT root = (s32[<=4, 2]) tuple(output)\n}\ncondition {\n stack = (s32[<=4,2]) parameter(0)\n stack_buffer = s32[<=4,2] get-tuple-element(stack), index=0\n stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}\n two = s32[] constant(2)\n ROOT greater-than = pred[] compare(s32[] stack_size, s32[] two), direction=GT\n}\nENTRY entry {\n one = s32[] constant(1)\n zero = s32[] constant(0)\n four = s32[] constant(4)\n stack_buffer_input = s32[4, 2] broadcast(s32[] one), dimensions={}\n stack_buffer_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, four), dimensions={0}\n input_tuple = (s32[<=4, 2]) tuple(stack_buffer_dynamic)\n while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition\n stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0\n ROOT reduce = s32[2] reduce(stack_buffer, zero),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));\n Literal expected = LiteralUtil::CreateR1({{2, 2}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DoubleDynamicDimension) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[2, 3, 3] parameter(0)\n size = s32[] constant(2)\n param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size),\n dimensions={1}\n param_padded = s32[2, 3, <=3] set-dimension-size(param_padded_partial, size),\n dimensions={2}\n reshaped = s32[<=18] reshape(param_padded)\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(reshaped, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR3(\n {{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(16);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicReshapeDoubleDynamicDimensions) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nENTRY main {\n param = s32[2, 3, 3] parameter(0)\n size = s32[] constant(2)\n param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size),\n dimensions={1}\n param_padded = s32[2, <=3, <=3] set-dimension-size(param_padded_partial, size),\n dimensions={2}\n result_size = s32[] constant(8)\n ROOT reshaped = s32[<=18] dynamic-reshape(param_padded, result_size)\n}\n)\";\n Literal operand = LiteralUtil::CreateR3(\n {{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}, false));\n result.SetDynamicSize(0, 8);\n Literal expected = LiteralUtil::CreateR1({0, 1, 3, 4, 0, 1, 3, 4});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicReshapeOutputDoubleDynamicDimensions) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nENTRY main {\n param = s32[18] parameter(0)\n eight = s32[] constant(8)\n param_dynamic = s32[<=18] set-dimension-size(param, eight), dimensions={0}\n two = s32[] constant(2)\n ROOT reshaped = s32[2, <=3, <=3] dynamic-reshape(param_dynamic, two, two, two)\n}\n)\";\n Literal operand = LiteralUtil::CreateR1(\n {0, 1, 3, 4, 0, 1, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}, false));\n VLOG(1) << \" result: \" << result.ToString();\n result.SetDynamicSize(1, 2);\n result.SetDynamicSize(2, 2);\n Literal expected =\n LiteralUtil::CreateR3({{{0, 1}, {3, 4}}, {{0, 1}, {3, 4}}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicReshapeComplicated) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nENTRY main {\n param = s32[3, 4, 4] parameter(0)\n two = s32[] constant(2)\n param_dynamic = s32[<=3, 4, 4] set-dimension-size(param, two), dimensions={0}\n three = s32[] constant(3)\n param_dynamic1 = s32[<=3, <=4, 4] set-dimension-size(param_dynamic, three), dimensions={1}\n param_dynamic2 = s32[<=3, <=4, <=4] set-dimension-size(param_dynamic1, three), dimensions={2}\n six = s32[] constant(6)\n ROOT reshaped = s32[<=6, <=8] dynamic-reshape(param_dynamic2, three, six)\n}\n)\";\n Literal operand = LiteralUtil::CreateR3(\n {{{0, 1, 2, -1}, {3, 4, 5, -1}, {6, 7, 8, -1}, {-1, -1, -1, -1}},\n {{9, 8, 7, -1}, {6, 5, 4, -1}, {3, 2, 1, -1}, {-1, -1, -1, -1}},\n {{-1, -1, -1, -1},\n {-1, -1, -1, -1},\n {-1, -1, -1, -1},\n {-1, -1, -1, -1}}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}, false));\n result.SetDynamicSize(0, 3);\n result.SetDynamicSize(1, 6);\n Literal expected = LiteralUtil::CreateR2(\n {{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 8, 7}, {6, 5, 4, 3, 2, 1}});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, SetGetDimensionSize) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowScatterV1\nENTRY main {\n param = s32[3] parameter(0)\n size = s32[] constant(2)\n param_dynamic_size = s32[3] set-dimension-size(param, size),\n dimensions={0}\n ROOT gds = s32[] get-dimension-size(param_dynamic_size),\n dimensions={0}\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({1, 2, 3});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand}));\n Literal expected = LiteralUtil::CreateR0(2);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicSort) {\n const std::string hlo_text = R\"(\nHloModule TEST\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\n%compare-greater-than (lhs: s32[], rhs: s32[]) -> pred[] {\n %lhs = s32[] parameter(0)\n %rhs = s32[] parameter(1)\n ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT\n}\nENTRY main {\n param = s32[4] parameter(0)\n size = s32[] constant(3)\n param_dynamic_size = s32[<=4] set-dimension-size(param, size),\n dimensions={0}\n ROOT sort = s32[<=4]{0} sort(s32[4]{0} %param_dynamic_size),\n dimensions={0}, is_stable=false, to_apply=%compare-greater-than\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({1, 4, 3, 2});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand},\n false));\n Literal expected = LiteralUtil::CreateR1({4, 3, 1, 2});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicPad) {\n const std::string hlo_text = R\"(\nHloModule TEST\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[4] parameter(0)\n size = s32[] constant(3)\n padding = s32[] constant(2)\n param_dynamic = s32[<=4] set-dimension-size(param, size),\n dimensions={0}\n pad = s32[<=6] pad(param_dynamic, padding), padding=1_1\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(pad, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({1, 4, 3, 5});\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand},\n false));\n Literal expected = LiteralUtil::CreateR0(12);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicPadInteriorPadding) {\n const std::string hlo_text = R\"(\nHloModule TEST\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[4] parameter(0)\n size = s32[] constant(3)\n padding = s32[] constant(2)\n param_dynamic = s32[<=4] set-dimension-size(param, size),\n dimensions={0}\n pad = s32[<=7] pad(param_dynamic, padding), padding=0_0_1\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(pad, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({1, 4, 3, 5});\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand},\n false));\n Literal expected = LiteralUtil::CreateR0(12);\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicConditionalDimension) {\n const std::string hlo_text = R\"(\nHloModule module\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\ntrue_branch {\n true_param = (s32[<=3,2]) parameter(0)\n param = s32[<=3, 2] get-tuple-element(true_param), index=0\n add = s32[<=3,2] add(param, param)\n ROOT true_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add)\n}\nfalse_branch {\n false_param = (s32[<=3,2]) parameter(0)\n param = s32[<=3, 2] get-tuple-element(false_param), index=0\n add = s32[<=3,2] add(param, param)\n ROOT false_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add)\n}\nENTRY entry {\n param0 = s32[3,2] parameter(0)\n size = s32[] constant(2)\n branch = pred[] constant(false)\n param_dynamic = s32[<=3, 2] set-dimension-size(param0, size), dimensions={0}\n param_tuple = (s32[<=3 ,2]) tuple(param_dynamic)\n conditional = (s32[<=3, 2], s32[<=3, 2]) conditional(branch, param_tuple, param_tuple),\n true_computation=true_branch, false_computation=false_branch\n gte0 = s32[<=3,2] get-tuple-element(conditional), index=1\n init = s32[] constant(0)\n ROOT reduce = s32[2] reduce(gte0, init),\n dimensions={0},\n to_apply=update_s32\n}\n)\";\n Literal operand = LiteralUtil::CreateR2({{0, 1}, {2, 3}, {4, 5}});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand},\n false));\n Literal expected = LiteralUtil::CreateR1({4, 8});\n EXPECT_EQ(result, expected);\n}\nXLA_TEST_F(ExecutionTest, DynamicTupleSort) {\n const std::string hlo_text = R\"(\nHloModule TEST\n%compare-greater-than (lhs: s32[], rhs: s32[], lhs_2: s32[], lhs_2: s32[]) -> pred[] {\n %lhs = s32[] parameter(0)\n %rhs = s32[] parameter(1)\n %lhs_2 = s32[] parameter(2)\n %rhs_2 = s32[] parameter(3)\n ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT\n}\nupdate_s32 (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n}\nENTRY main {\n param = s32[3] parameter(0)\n size = s32[] constant(2)\n param_dynamic_size = s32[<=3] set-dimension-size(param, size),\n dimensions={0}\n sort = (s32[<=3]{0}, s32[<=3]{0}) sort(s32[<=3]{0} %param_dynamic_size,\n s32[<=3]{0} %param_dynamic_size),\n dimensions={0}, is_stable=true, to_apply=%compare-greater-than\n ROOT get-tuple-element = s32[<=3]{0} get-tuple-element((s32[<=3]{0}, s32[<=3]{0}) %sort),\n index=0\n}\n)\";\n Literal operand = LiteralUtil::CreateR1({0, 4, 2});\n auto module = GetHloModule(hlo_text);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n PadAndExecute(std::move(module), {&operand},\n false));\n Literal expected = LiteralUtil::CreateR1({4, 0, 2});\n EXPECT_EQ(result, expected);\n}\nnamespace op = xla::testing::opcode_matchers;\nclass HloDimensionSizeLegalizerTest : public HloTestBase {\n protected:\n HloDimensionSizeLegalizerTest() {}\n};\nTEST_F(HloDimensionSizeLegalizerTest, Ok) {\n auto module = ParseAndReturnVerifiedModule(R\"(\nHloModule _\nENTRY gds {\n p = s32[3,4] parameter(0)\n size0 = s32[] get-dimension-size(p), dimensions={0}\n size1 = s32[] get-dimension-size(p), dimensions={1}\n ROOT mul = s32[] multiply(size0, size1)\n})\")\n .value();\n DynamicPadder pass;\n EXPECT_TRUE(pass.Run(module.get()).value());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Multiply(op::Constant(), op::Constant()));\n}\nTEST_F(HloDimensionSizeLegalizerTest, GetSetSetDimensionSizeRewriter) {\n auto module = ParseAndReturnVerifiedModule(R\"(\nHloModule _\nENTRY gds {\n p = s32[3,4] parameter(0)\n size0 = s32[] get-dimension-size(p), dimensions={0}\n p_copy = s32[3,4] copy(p)\n p_copy_dynamic = s32[<=3, 4] set-dimension-size(p_copy, size0), dimensions={0}\n size1 = s32[] get-dimension-size(p_copy_dynamic), dimensions={0}\n ROOT mul = s32[] multiply(size0, size1)\n})\")\n .value();\n DynamicPadder pass;\n EXPECT_TRUE(pass.Run(module.get()).value());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Multiply(op::Constant(), op::Constant()));\n}\nTEST_F(HloDimensionSizeLegalizerTest, IllegalType) {\n auto module = ParseAndReturnUnverifiedModule(R\"(\nHloModule _\nENTRY gds {\n p = s32[3]{0} parameter(0)\n ROOT gds = s64[] get-dimension-size(p), dimensions={0}\n})\")\n .value();\n DynamicPadder pass;\n EXPECT_FALSE(pass.Run(module.get()).ok());\n}\nTEST_F(HloDimensionSizeLegalizerTest, IllegalDimension) {\n auto module = ParseAndReturnUnverifiedModule(R\"(\nHloModule _\nENTRY gds {\n p = f32[2,5] parameter(0)\n ROOT gds = s32[] get-dimension-size(p), dimensions={2}\n})\")\n .value();\n DynamicPadder pass;\n EXPECT_FALSE(pass.Run(module.get()).ok());\n}\nclass SizeCheckTest : public HloTestBase {\n protected:\n SizeCheckTest() {}\n};\nTEST_F(SizeCheckTest, CompileTimeCheckBinaryOpFail) {\n auto module = ParseAndReturnUnverifiedModule(R\"(\nHloModule _\nENTRY gds {\n size_0 = s32[] parameter(0)\n size_1 = s32[] parameter(1)\n arg = s32[4]{0} parameter(2)\n dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0}\n dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0}\n ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1)\n})\")\n .value();\n auto options = DynamicPadderOptions();\n options.shape_check_mode =\n DynamicDimensionInference::ShapeCheckMode::kCompileTime;\n DynamicPadder pass(options);\n auto status = pass.Run(module.get()).status();\n EXPECT_THAT(status.code(), tsl::error::INVALID_ARGUMENT);\n}\nTEST_F(SizeCheckTest, CompileTimeCheckBinaryOpPass) {\n auto module = ParseAndReturnUnverifiedModule(R\"(\nHloModule _\nENTRY gds {\n size_0 = s32[] parameter(0)\n size_0_reshape = s32[1] reshape(size_0)\n size_1 = s32[] reshape(size_0_reshape)\n arg = s32[4]{0} parameter(1)\n dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0}\n dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0}\n ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1)\n})\")\n .value();\n auto options = DynamicPadderOptions();\n options.shape_check_mode =\n DynamicDimensionInference::ShapeCheckMode::kCompileTime;\n DynamicDimensionSimplifier simplifier;\n EXPECT_TRUE(simplifier.Run(module.get()).ok());\n DynamicPadder pass(options);\n auto status = pass.Run(module.get()).status();\n EXPECT_TRUE(status.ok());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1127,"cells":{"ID":{"kind":"string","value":"fb09fbe9-6296-4eab-b4c4-62ab42ce86f5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reduce_decomposer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/reduce_decomposer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/reduce_decomposer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/reduce_decomposer.h\"\n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/service/hlo_creation_utils.h\"\nnamespace xla {\nnamespace {\nclass VariadicReductionLayoutEqualizer : public DfsHloRewriteVisitor {\n public:\n absl::Status HandleReduce(HloInstruction* hlo) override {\n auto reduce = Cast(hlo);\n std::vector new_inputs;\n bool changed = false;\n for (HloInstruction* input : reduce->inputs()) {\n auto first_input = reduce->inputs()[0];\n auto first_input_s = first_input->shape();\n auto input_s = input->shape();\n if (first_input_s.layout() != input_s.layout()) {\n Shape new_input_s = ShapeUtil::MakeShapeWithDenseLayout(\n input_s.element_type(), input_s.dimensions(),\n first_input_s.layout().minor_to_major());\n auto copy = MakeCopyHlo(input, new_input_s);\n changed = true;\n new_inputs.push_back(copy);\n } else {\n new_inputs.push_back(input);\n }\n }\n if (changed) {\n TF_ASSIGN_OR_RETURN(\n auto new_reduce,\n MakeReduceHlo(new_inputs, reduce->init_values(), reduce->dimensions(),\n reduce->called_computations()[0]));\n TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, new_reduce));\n }\n return absl::OkStatus();\n }\n};\nclass ReduceDecomposerVisitor : public DfsHloRewriteVisitor {\n public:\n explicit ReduceDecomposerVisitor(HloPredicate custom_layout_allowed)\n : custom_layout_allowed_(std::move(custom_layout_allowed)) {}\n absl::Status HandleReduce(HloInstruction* hlo) override {\n auto reduce = Cast(hlo);\n auto shape = reduce->shape();\n if (custom_layout_allowed_ && custom_layout_allowed_(reduce)) {\n return absl::OkStatus();\n }\n std::vector expected_shapes(reduce->input_count());\n for (int i = 0; i < reduce->input_count(); i++) {\n expected_shapes[i] = ExpectedOutputShape(reduce, i);\n TF_RET_CHECK(reduce->inputs()[i]->shape().layout() ==\n reduce->inputs()[0]->shape().layout());\n }\n std::vector output_shapes;\n if (shape.IsTuple()) {\n for (int i = 0; i < shape.tuple_shapes_size(); i++) {\n output_shapes.push_back(ShapeUtil::GetTupleElementShape(shape, i));\n TF_RET_CHECK(output_shapes[i].layout() == output_shapes[0].layout());\n }\n } else {\n output_shapes.push_back(shape);\n }\n TF_RET_CHECK(!output_shapes.empty());\n if (ShapeUtil::MakeMaybeTupleShape(expected_shapes) !=\n ShapeUtil::MakeMaybeTupleShape(output_shapes)) {\n TF_ASSIGN_OR_RETURN(auto r_prime,\n MakeReduceHlo(reduce->inputs(), reduce->init_values(),\n reduce->dimensions(),\n reduce->called_computations()[0]));\n TF_RET_CHECK(r_prime->shape() ==\n ShapeUtil::MakeMaybeTupleShape(expected_shapes));\n if (!shape.IsTuple()) {\n auto copy = MakeCopyHlo(r_prime, shape);\n TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, copy));\n return absl::OkStatus();\n }\n std::vector copies;\n for (int i = 0; i < reduce->input_count(); i++) {\n TF_ASSIGN_OR_RETURN(auto from, GetOutput(r_prime, i));\n auto copy = MakeCopyHlo(from, output_shapes[i]);\n copies.push_back(copy);\n }\n auto out = MaybeMakeTuple(copies);\n TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, out));\n }\n return absl::OkStatus();\n }\n private:\n absl::StatusOr GetOutput(HloInstruction* instr, int idx) {\n if (instr->shape().IsTuple()) {\n return MakeGetTupleElementHlo(instr, idx);\n } else {\n TF_RET_CHECK(idx == 0);\n return instr;\n }\n }\n Shape ExpectedOutputShape(HloReduceInstruction* reduce, int input_idx) {\n Shape reduce_shape = reduce->shape();\n auto output_shape = reduce_shape.IsTuple()\n ? reduce_shape.tuple_shapes(input_idx)\n : reduce_shape;\n auto* operand = reduce->inputs()[input_idx];\n auto operand_shape = operand->shape();\n return ShapeUtil::DeleteDimensions(reduce->dimensions(), operand_shape);\n }\n HloPredicate custom_layout_allowed_;\n};\n} \nabsl::StatusOr ReduceDecomposer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n TF_ASSIGN_OR_RETURN(bool changed1,\n VariadicReductionLayoutEqualizer{}.RunOnModule(\n module, execution_threads));\n TF_ASSIGN_OR_RETURN(\n bool changed2,\n ReduceDecomposerVisitor{custom_layout_allowed_}.RunOnModule(\n module, execution_threads));\n return changed1 || changed2;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/reduce_decomposer.h\"\n#include \n#include \n#include \n#include \"xla/service/hlo_parser.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/filecheck.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nclass ReduceDecomposerTest : public HloTestBase {};\nTEST_F(ReduceDecomposerTest, ReducePerformsTransposition) {\n const char* hlo = R\"(\nHloModule module\nadd {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT out = add(a, b)\n}\nENTRY c {\n p = f32[5,3,4]{2,1,0} parameter(0)\n z = f32[] constant(0)\n ROOT r = f32[5,4]{0,1} reduce(p, z), dimensions={1}, to_apply=add\n}\n)\";\n RunAndFilecheckHloRewrite(\n hlo,\n ReduceDecomposer{[&](const HloInstruction*) {\n return true;\n }},\n std::nullopt);\n RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},\n R\"(\n )\");\n}\nTEST_F(ReduceDecomposerTest, ReduceNaturalLayout) {\n const char* hlo = R\"(\nHloModule module\nadd {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT out = add(a, b)\n}\nENTRY c {\n p = f32[5,3,4]{2,1,0} parameter(0)\n z = f32[] constant(0)\n ROOT r = reduce(p, z), dimensions={1}, to_apply=add\n}\n)\";\n RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);\n}\nTEST_F(ReduceDecomposerTest, VariadicReductionWithTranspose) {\n const char* hlo = R\"(\nHloModule ReduceWithLayoutChangeVariadicDifferent\nargmax {\n running_max = f32[] parameter(0)\n running_max_idx = u32[] parameter(1)\n current_value = f32[] parameter(2)\n current_value_idx = u32[] parameter(3)\n current = (f32[], u32[]) tuple(running_max, running_max_idx)\n potential = (f32[], u32[]) tuple(current_value, current_value_idx)\n cmp_code = pred[] compare(current_value, running_max), direction=GT\n new_max = f32[] select(cmp_code, current_value, running_max)\n new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)\n ROOT out = (f32[], u32[]) tuple(new_max, new_idx)\n}\nENTRY main {\n arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)\n idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)\n constant0 = f32[] constant(0)\n constant1 = u32[] constant(0)\n ROOT reduce0 = (\n f32[2,3,4]{0,1,2},\n u32[2,3,4]{0,1,2}\n ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax\n}\n )\";\n RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},\n R\"(\n )\");\n}\nTEST_F(ReduceDecomposerTest, VariadicReductionDescendingLayout) {\n const char* hlo = R\"(\nHloModule ReduceWithLayoutChangeVariadicDifferent\nargmax {\n running_max = f32[] parameter(0)\n running_max_idx = u32[] parameter(1)\n current_value = f32[] parameter(2)\n current_value_idx = u32[] parameter(3)\n current = (f32[], u32[]) tuple(running_max, running_max_idx)\n potential = (f32[], u32[]) tuple(current_value, current_value_idx)\n cmp_code = pred[] compare(current_value, running_max), direction=GT\n new_max = f32[] select(cmp_code, current_value, running_max)\n new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)\n ROOT out = (f32[], u32[]) tuple(new_max, new_idx)\n}\nENTRY main {\n arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)\n idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)\n constant0 = f32[] constant(0)\n constant1 = u32[] constant(0)\n ROOT reduce0 = (\n f32[2,3,4]{2,1,0},\n u32[2,3,4]{2,1,0}\n ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax\n}\n )\";\n RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);\n}\nTEST_F(ReduceDecomposerTest, VariadicReductionInputsDifferentLayout) {\n const char* hlo = R\"(\nHloModule ReduceWithLayoutChangeVariadicDifferent\nargmax {\n running_max = f32[] parameter(0)\n running_max_idx = u32[] parameter(1)\n current_value = f32[] parameter(2)\n current_value_idx = u32[] parameter(3)\n current = (f32[], u32[]) tuple(running_max, running_max_idx)\n potential = (f32[], u32[]) tuple(current_value, current_value_idx)\n cmp_code = pred[] compare(current_value, running_max), direction=GT\n new_max = f32[] select(cmp_code, current_value, running_max)\n new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)\n ROOT out = (f32[], u32[]) tuple(new_max, new_idx)\n}\nENTRY main {\n arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)\n idxs = u32[2,3,4,1024]{2,1,3,0} parameter(1)\n constant0 = f32[] constant(0)\n constant1 = u32[] constant(0)\n ROOT reduce0 = (\n f32[2,3,4]{2,1,0},\n u32[2,3,4]{2,1,0}\n ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax\n}\n )\";\n RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, R\"(\n)\");\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1128,"cells":{"ID":{"kind":"string","value":"f470b0a5-df2e-44f5-a4bb-da606c6ec291"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dynamic_index_splitter"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/dynamic_index_splitter.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/dynamic_index_splitter_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/dynamic_index_splitter.h\"\n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/shape_util.h\"\nnamespace xla {\nabsl::StatusOr DynamicIndexSplitter::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n std::vector computations =\n module->MakeNonfusionComputations(execution_threads);\n for (HloComputation* computation : computations) {\n for (HloInstruction* dynamic_op : computation->MakeInstructionPostOrder()) {\n switch (dynamic_op->opcode()) {\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kDynamicUpdateSlice:\n break;\n default:\n continue;\n }\n auto parent = dynamic_op->parent();\n bool is_update = dynamic_op->opcode() == HloOpcode::kDynamicUpdateSlice;\n int64_t num_indices = dynamic_op->operand(0)->shape().rank();\n if (num_indices == 0) {\n if (is_update) {\n TF_CHECK_OK(parent->ReplaceInstruction(\n dynamic_op, dynamic_op->mutable_operand(1)));\n } else {\n TF_CHECK_OK(parent->ReplaceInstruction(\n dynamic_op, dynamic_op->mutable_operand(0)));\n }\n changed = true;\n continue;\n }\n int64_t index_operand_number =\n Cast(dynamic_op)\n ->first_index_operand_number();\n auto index_operand = dynamic_op->mutable_operand(index_operand_number);\n if (ShapeUtil::IsScalar(index_operand->shape())) {\n continue;\n }\n TF_RET_CHECK(index_operand->shape().rank() == 1);\n auto index_element_type = index_operand->shape().element_type();\n std::vector index_array;\n index_array.reserve(num_indices);\n for (int64_t dim = 0; dim < num_indices; ++dim) {\n auto slice = parent->AddInstruction(HloInstruction::CreateSlice(\n ShapeUtil::MakeShape(index_element_type, {1}), index_operand, {dim},\n {dim + 1}, {1}));\n auto bitcast = parent->AddInstruction(HloInstruction::CreateReshape(\n ShapeUtil::MakeShape(index_element_type, {}), slice));\n index_array.push_back(bitcast);\n }\n auto new_dynamic_op =\n is_update\n ? HloInstruction::CreateDynamicUpdateSlice(\n dynamic_op->shape(), dynamic_op->mutable_operand(0),\n dynamic_op->mutable_operand(1), absl::MakeSpan(index_array))\n : HloInstruction::CreateDynamicSlice(\n dynamic_op->shape(), dynamic_op->mutable_operand(0),\n absl::MakeSpan(index_array),\n dynamic_op->dynamic_slice_sizes());\n TF_CHECK_OK(parent->ReplaceWithNewInstruction(dynamic_op,\n std::move(new_dynamic_op)));\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/dynamic_index_splitter.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass DynamicIndexSplitterTest : public HloTestBase {};\nTEST_F(DynamicIndexSplitterTest, DynamicSlice) {\n const char* const kDynamicSlice = R\"(\n HloModule DynamicSlice_module\n ENTRY entry (operand: s32[4,5,6], indices: s32[3]) -> s32[1,1,1] {\n operand = s32[4,5,6] parameter(0)\n indices = s32[3] parameter(1)\n ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, indices), dynamic_slice_sizes={1,1,1}\n }\n )\";\n HloModuleConfig config;\n DebugOptions debug_options = config.debug_options();\n debug_options.set_xla_allow_scalar_index_dynamic_ops(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kDynamicSlice, config));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n DynamicIndexSplitter().Run(module.get()));\n EXPECT_TRUE(changed);\n ASSERT_THAT(module->entry_computation()->root_instruction(),\n op::DynamicSlice(op::Parameter(0),\n op::Reshape(op::Slice(op::Parameter(1))),\n op::Reshape(op::Slice(op::Parameter(1))),\n op::Reshape(op::Slice(op::Parameter(1)))));\n for (int i = 0; i < 3; ++i) {\n const HloInstruction* slice = module->entry_computation()\n ->root_instruction()\n ->operand(i + 1)\n ->operand(0);\n EXPECT_EQ(slice->slice_starts(0), i);\n EXPECT_EQ(slice->slice_limits(0), i + 1);\n }\n}\nTEST_F(DynamicIndexSplitterTest, DynamicUpdateSlice) {\n const char* const kDynamicUpdateSlice = R\"(\n HloModule DynamicUpdatedSlice_module\n ENTRY entry (operand: s32[4,5,6], indices: s32[3], update: s32[1,1,1]) -> s32[4,5,6] {\n operand = s32[4,5,6] parameter(0)\n indices = s32[3] parameter(1)\n update = s32[1,1,1] parameter(2)\n ROOT dynamic-update-slice = s32[4,5,6] dynamic-update-slice(operand, update, indices)\n }\n )\";\n HloModuleConfig config;\n DebugOptions debug_options = config.debug_options();\n debug_options.set_xla_allow_scalar_index_dynamic_ops(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, ParseAndReturnVerifiedModule(kDynamicUpdateSlice, config));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n DynamicIndexSplitter().Run(module.get()));\n EXPECT_TRUE(changed);\n ASSERT_THAT(module->entry_computation()->root_instruction(),\n op::DynamicUpdateSlice(op::Parameter(0), op::Parameter(2),\n op::Reshape(op::Slice(op::Parameter(1))),\n op::Reshape(op::Slice(op::Parameter(1))),\n op::Reshape(op::Slice(op::Parameter(1)))));\n for (int i = 0; i < 3; ++i) {\n const HloInstruction* slice = module->entry_computation()\n ->root_instruction()\n ->operand(i + 2)\n ->operand(0);\n EXPECT_EQ(slice->slice_starts(0), i);\n EXPECT_EQ(slice->slice_limits(0), i + 1);\n }\n}\nTEST_F(DynamicIndexSplitterTest, AlreadyScalar) {\n const char* const kDynamicSlice = R\"(\n HloModule DynamicSlice_module\n ENTRY entry (operand: s32[4,5,6], index.0: s32[], index.1: s32[], index.2: s32[]) -> s32[1,1,1] {\n operand = s32[4,5,6] parameter(0)\n index.0 = s32[] parameter(1)\n index.1 = s32[] parameter(2)\n index.2 = s32[] parameter(3)\n ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, index.0, index.1, index.2), dynamic_slice_sizes={1,1,1}\n }\n )\";\n HloModuleConfig config;\n DebugOptions debug_options = config.debug_options();\n debug_options.set_xla_allow_scalar_index_dynamic_ops(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kDynamicSlice, config));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n DynamicIndexSplitter().Run(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::DynamicSlice(op::Parameter(0), op::Parameter(1),\n op::Parameter(2), op::Parameter(3)));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1129,"cells":{"ID":{"kind":"string","value":"d759e3dd-49f9-433a-a301-b7317ffa4f80"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"root_instruction_sinker"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/root_instruction_sinker.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/root_instruction_sinker_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/root_instruction_sinker.h\"\n#include \"xla/service/tuple_util.h\"\nnamespace xla {\nnamespace {\nvoid SinkTupleRoot(HloComputation* computation) {\n HloInstruction* root = computation->root_instruction();\n CHECK(root->shape().IsTuple());\n HloInstruction* new_root = TupleUtil::Duplicate(root);\n HloInstructionSequence& sequence =\n computation->parent()->schedule().GetOrCreateSequence(computation);\n for (HloInstruction* operand : new_root->operands()) {\n sequence.push_back(operand);\n }\n sequence.push_back(new_root);\n computation->set_root_instruction(new_root);\n}\nvoid SinkNontupleRoot(HloComputation* computation) {\n HloInstruction* root = computation->root_instruction();\n CHECK(!root->shape().IsTuple());\n HloInstruction* new_root = computation->AddInstruction(\n HloInstruction::CreateBitcast(root->shape(), root));\n HloInstructionSequence& sequence =\n computation->parent()->schedule().GetOrCreateSequence(computation);\n sequence.push_back(new_root);\n computation->set_root_instruction(new_root);\n}\n} \nabsl::StatusOr RootInstructionSinker::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n TF_RET_CHECK(module->has_schedule());\n bool modified = false;\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n HloInstructionSequence& sequence =\n module->schedule().GetOrCreateSequence(computation);\n if (computation->root_instruction() ==\n sequence.instructions().at(sequence.size() - 1)) {\n continue;\n }\n if (computation->root_instruction()->shape().IsTuple()) {\n SinkTupleRoot(computation);\n } else {\n SinkNontupleRoot(computation);\n }\n modified = true;\n }\n return modified;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/root_instruction_sinker.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nusing RootInstructionSinkerTest = HloTestBase;\nTEST_F(RootInstructionSinkerTest, TupleNoChange) {\n absl::string_view hlo_string = R\"(\n HloModule While, is_scheduled=true\n While.body {\n loop_var.1 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n constant.1 = s32[] constant(1)\n add = s32[] add(get-tuple-element.1, constant.1)\n get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1\n multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)\n ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)\n }\n While.condition {\n loop_var.2 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0\n constant.2 = s32[] constant(100)\n ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT\n }\n ENTRY While {\n constant.3 = s32[] constant(42)\n constant.4 = s32[3]{0} constant({0, 1, 2})\n tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)\n ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=\n While.condition, body=While.body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto while_body =\n module->entry_computation()->root_instruction()->while_body();\n int num_body_instructions = while_body->instruction_count();\n RootInstructionSinker sinker;\n EXPECT_FALSE(sinker.Run(module.get()).value());\n EXPECT_EQ(module->entry_computation()\n ->root_instruction()\n ->while_body()\n ->instruction_count(),\n num_body_instructions);\n}\nTEST_F(RootInstructionSinkerTest, Tuple) {\n absl::string_view hlo_string = R\"(\n HloModule While, is_scheduled=true\n While.body {\n loop_var.1 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n constant.1 = s32[] constant(1)\n add = s32[] add(get-tuple-element.1, constant.1)\n get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1\n multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)\n ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)\n after-all = token[] after-all()\n send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1\n send-done = token[] send-done(send), channel_id=1\n }\n While.condition {\n loop_var.2 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0\n constant.2 = s32[] constant(100)\n ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT\n }\n ENTRY While {\n constant.3 = s32[] constant(42)\n constant.4 = s32[3]{0} constant({0, 1, 2})\n tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)\n ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=\n While.condition, body=While.body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n RootInstructionSinker sinker;\n EXPECT_TRUE(sinker.Run(module.get()).value());\n auto while_body =\n module->entry_computation()->root_instruction()->while_body();\n const auto& sequence = module->schedule().sequence(while_body);\n EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),\n while_body->root_instruction());\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Tuple()),\n op::GetTupleElement(op::Tuple())));\n}\nTEST_F(RootInstructionSinkerTest, NontupleNoChange) {\n absl::string_view hlo_string = R\"(\n HloModule Call, is_scheduled=true\n Call {\n param = s32[3]{0} parameter(0)\n ROOT multiply = s32[3]{0} multiply(param, param)\n }\n ENTRY While {\n constant.4 = s32[3]{0} constant({0, 1, 2})\n ROOT call = s32[3]{0} call(constant.4), to_apply=Call\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto called_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n int num_instructions = called_computation->instruction_count();\n RootInstructionSinker sinker;\n EXPECT_FALSE(sinker.Run(module.get()).value());\n EXPECT_EQ(module->entry_computation()\n ->root_instruction()\n ->called_computations()[0]\n ->instruction_count(),\n num_instructions);\n}\nTEST_F(RootInstructionSinkerTest, Nontuple) {\n absl::string_view hlo_string = R\"(\n HloModule Call, is_scheduled=true\n Call {\n param = s32[3]{0} parameter(0)\n ROOT multiply = s32[3]{0} multiply(param, param)\n after-all = token[] after-all()\n send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1\n send-done = token[] send-done(send), channel_id=1\n }\n ENTRY While {\n constant.4 = s32[3]{0} constant({0, 1, 2})\n ROOT call = s32[3]{0} call(constant.4), to_apply=Call\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n RootInstructionSinker sinker;\n EXPECT_TRUE(sinker.Run(module.get()).value());\n auto called_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n const auto& sequence = module->schedule().sequence(called_computation);\n EXPECT_EQ(sequence.instructions().at(sequence.size() - 1),\n called_computation->root_instruction());\n EXPECT_THAT(called_computation->root_instruction(),\n op::Bitcast(op::Multiply()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1130,"cells":{"ID":{"kind":"string","value":"5c54fd05-5e24-4353-98e8-4a51907c7f2c"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dot_decomposer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/dot_decomposer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/dot_decomposer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/dot_decomposer.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nabsl::Status CanonicalizeDot(HloDotInstruction* original_dot) {\n auto computation = original_dot->parent();\n const auto& original_dnums = original_dot->dot_dimension_numbers();\n const int64_t num_batch_dims = original_dnums.lhs_batch_dimensions_size();\n const int64_t num_contracting_dims =\n original_dnums.lhs_contracting_dimensions_size();\n int lhs_sparse_dim = -1, rhs_sparse_dim = -1;\n for (const SparsityDescriptor& descriptor : original_dot->sparsity()) {\n (descriptor.index() == 0 ? lhs_sparse_dim : rhs_sparse_dim) =\n descriptor.dimension();\n }\n auto move_dim_to_end = [&](std::vector& dims, int sparse_dim) {\n if (sparse_dim < 0) return;\n auto it = std::remove(dims.begin(), dims.end(), sparse_dim);\n *it = sparse_dim; \n };\n const auto& lhs_shape = original_dot->operand(0)->shape();\n const int64_t lhs_rank = lhs_shape.rank();\n const int64_t num_lhs_non_contracting_dims =\n lhs_rank - num_batch_dims - num_contracting_dims;\n std::vector lhs_non_contracting_dims;\n lhs_non_contracting_dims.reserve(num_lhs_non_contracting_dims);\n int64_t lhs_contracting_size = 1;\n bool lhs_contracting_dynamic = false;\n int64_t lhs_non_contracting_size = 1;\n bool lhs_non_contracting_dynamic = false;\n std::vector batch_dim_sizes;\n batch_dim_sizes.reserve(num_batch_dims);\n std::vector batch_dynamic_dims;\n batch_dynamic_dims.reserve(num_batch_dims);\n for (int64_t i = 0; i < lhs_rank; ++i) {\n if (absl::c_linear_search(original_dnums.lhs_contracting_dimensions(), i)) {\n lhs_contracting_size *= lhs_shape.dimensions(i);\n lhs_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);\n } else if (absl::c_linear_search(original_dnums.lhs_batch_dimensions(),\n i)) {\n batch_dim_sizes.push_back(lhs_shape.dimensions(i));\n batch_dynamic_dims.push_back(lhs_shape.is_dynamic_dimension(i));\n } else {\n lhs_non_contracting_dims.push_back(i);\n lhs_non_contracting_size *= lhs_shape.dimensions(i);\n lhs_non_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);\n }\n }\n std::vector lhs_transpose;\n lhs_transpose.reserve(lhs_rank);\n lhs_transpose.insert(lhs_transpose.end(),\n original_dnums.lhs_batch_dimensions().begin(),\n original_dnums.lhs_batch_dimensions().end());\n lhs_transpose.insert(lhs_transpose.end(), lhs_non_contracting_dims.begin(),\n lhs_non_contracting_dims.end());\n lhs_transpose.insert(lhs_transpose.end(),\n original_dnums.lhs_contracting_dimensions().begin(),\n original_dnums.lhs_contracting_dimensions().end());\n move_dim_to_end(lhs_transpose, lhs_sparse_dim);\n HloInstruction* lhs_operand = original_dot->mutable_operand(0);\n HloInstruction* transposed_lhs = computation->AddInstruction(\n HloInstruction::CreateTranspose(\n ShapeUtil::PermuteDimensions(lhs_transpose, lhs_shape), lhs_operand,\n lhs_transpose),\n &lhs_operand->metadata());\n std::vector lhs_reshape_dims = batch_dim_sizes;\n std::vector lhs_reshape_dynamic_dims = batch_dynamic_dims;\n if (lhs_non_contracting_size > 1) {\n lhs_reshape_dims.push_back(lhs_non_contracting_size);\n lhs_reshape_dynamic_dims.push_back(lhs_non_contracting_dynamic);\n }\n lhs_reshape_dims.push_back(lhs_contracting_size);\n lhs_reshape_dynamic_dims.push_back(lhs_contracting_dynamic);\n HloInstruction* reshaped_lhs = computation->AddInstruction(\n HloInstruction::CreateReshape(\n ShapeUtil::MakeShape(lhs_shape.element_type(), lhs_reshape_dims,\n lhs_reshape_dynamic_dims),\n transposed_lhs),\n &transposed_lhs->metadata());\n const auto& rhs_shape = original_dot->operand(1)->shape();\n const int64_t rhs_rank = rhs_shape.rank();\n const int64_t num_rhs_non_contracting_dims =\n rhs_rank - num_batch_dims - num_contracting_dims;\n std::vector rhs_non_contracting_dims;\n rhs_non_contracting_dims.reserve(num_rhs_non_contracting_dims);\n int64_t rhs_non_contracting_size = 1;\n bool rhs_non_contracting_dynamic = false;\n int64_t rhs_contracting_size = 1;\n bool rhs_contracting_dynamic = false;\n for (int64_t i = 0; i < rhs_rank; ++i) {\n if (absl::c_linear_search(original_dnums.rhs_contracting_dimensions(), i)) {\n rhs_contracting_size *= rhs_shape.dimensions(i);\n rhs_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);\n } else if (!absl::c_linear_search(original_dnums.rhs_batch_dimensions(),\n i)) {\n rhs_non_contracting_dims.push_back(i);\n rhs_non_contracting_size *= rhs_shape.dimensions(i);\n rhs_non_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);\n }\n }\n std::vector rhs_transpose;\n rhs_transpose.reserve(rhs_rank);\n rhs_transpose.insert(rhs_transpose.end(),\n original_dnums.rhs_batch_dimensions().begin(),\n original_dnums.rhs_batch_dimensions().end());\n rhs_transpose.insert(rhs_transpose.end(),\n original_dnums.rhs_contracting_dimensions().begin(),\n original_dnums.rhs_contracting_dimensions().end());\n move_dim_to_end(rhs_transpose, rhs_sparse_dim);\n rhs_transpose.insert(rhs_transpose.end(), rhs_non_contracting_dims.begin(),\n rhs_non_contracting_dims.end());\n HloInstruction* rhs_operand = original_dot->mutable_operand(1);\n HloInstruction* transposed_rhs = computation->AddInstruction(\n HloInstruction::CreateTranspose(\n ShapeUtil::PermuteDimensions(rhs_transpose, rhs_shape), rhs_operand,\n rhs_transpose),\n &rhs_operand->metadata());\n std::vector rhs_reshape_dims = batch_dim_sizes;\n rhs_reshape_dims.push_back(rhs_contracting_size);\n std::vector rhs_reshape_dynamic_dims = batch_dynamic_dims;\n rhs_reshape_dynamic_dims.push_back(rhs_contracting_dynamic);\n if (rhs_non_contracting_size > 1) {\n rhs_reshape_dims.push_back(rhs_non_contracting_size);\n rhs_reshape_dynamic_dims.push_back(rhs_non_contracting_dynamic);\n }\n HloInstruction* reshaped_rhs = computation->AddInstruction(\n HloInstruction::CreateReshape(\n ShapeUtil::MakeShape(rhs_shape.element_type(), rhs_reshape_dims,\n rhs_reshape_dynamic_dims),\n transposed_rhs),\n &transposed_rhs->metadata());\n std::vector dot_dims = batch_dim_sizes;\n std::vector dot_dynamic_dims = batch_dynamic_dims;\n if (lhs_non_contracting_size > 1) {\n dot_dims.push_back(lhs_non_contracting_size);\n dot_dynamic_dims.push_back(lhs_non_contracting_dynamic);\n }\n if (rhs_non_contracting_size > 1) {\n dot_dims.push_back(rhs_non_contracting_size);\n dot_dynamic_dims.push_back(rhs_non_contracting_dynamic);\n }\n DotDimensionNumbers dot_dnums;\n for (int64_t i = 0; i < num_batch_dims; ++i) {\n dot_dnums.add_lhs_batch_dimensions(i);\n dot_dnums.add_rhs_batch_dimensions(i);\n }\n dot_dnums.add_lhs_contracting_dimensions(\n num_batch_dims + (lhs_non_contracting_size > 1 ? 1 : 0));\n dot_dnums.add_rhs_contracting_dimensions(num_batch_dims);\n std::vector sparsity;\n std::vector sparse_meta;\n sparsity.reserve(original_dot->sparse_operands());\n sparse_meta.reserve(original_dot->sparse_operands());\n auto transpose_meta = [&](HloInstruction* original_meta,\n absl::Span transpose) {\n return computation->AddInstruction(\n HloInstruction::CreateTranspose(\n ShapeUtil::PermuteDimensions(transpose, original_meta->shape()),\n original_meta, transpose),\n &original_meta->metadata());\n };\n for (int i = 0; i < original_dot->sparse_operands(); ++i) {\n SparsityDescriptor descriptor = original_dot->sparsity()[i];\n descriptor.set_dimension(num_batch_dims + (descriptor.index() == 0 &&\n lhs_non_contracting_size > 1));\n sparsity.push_back(descriptor);\n HloInstruction* meta =\n original_dot->mutable_operand(HloDotInstruction::kOperands + i);\n HloInstruction* meta_operand;\n if (descriptor.index() == 0) {\n meta = transpose_meta(meta, lhs_transpose);\n meta_operand = reshaped_lhs;\n } else {\n meta = transpose_meta(meta, rhs_transpose);\n meta_operand = reshaped_rhs;\n }\n TF_ASSIGN_OR_RETURN(Shape result_shape,\n ShapeInference::InferSparseDotMetadataShape(\n meta_operand->shape(), dot_dnums, descriptor));\n meta = computation->AddInstruction(\n HloInstruction::CreateReshape(result_shape, meta), &meta->metadata());\n sparse_meta.push_back(meta);\n }\n HloInstruction* dot = computation->AddInstruction(HloInstruction::CreateDot(\n ShapeUtil::MakeShape(original_dot->shape().element_type(), dot_dims,\n dot_dynamic_dims),\n reshaped_lhs, reshaped_rhs, dot_dnums, original_dot->precision_config(),\n sparsity, sparse_meta));\n original_dot->SetupDerivedInstruction(dot);\n std::unique_ptr replacement =\n HloInstruction::CreateReshape(original_dot->shape(), dot);\n VLOG(3) << \"Canonicalizing dot:\\n\"\n << \"\\t old: \" << original_dot->ToString() << \"\\n\"\n << \"\\t new: \" << dot->ToString() << \"\\n\"\n << \"\\t -> \" << replacement->ToString();\n return computation->ReplaceWithNewInstruction(original_dot,\n std::move(replacement));\n}\n} \nabsl::StatusOr DotDecomposer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n std::vector non_canonical_dots;\n for (auto* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n for (auto* instruction : computation->instructions()) {\n if (instruction->opcode() != HloOpcode::kDot) {\n continue;\n }\n const DotDimensionNumbers& dnums = instruction->dot_dimension_numbers();\n if (dnums.lhs_contracting_dimensions_size() != 1) {\n non_canonical_dots.push_back(instruction);\n continue;\n }\n if (dnums.lhs_batch_dimensions_size() + 2 <\n instruction->operand(0)->shape().rank() ||\n dnums.rhs_batch_dimensions_size() + 2 <\n instruction->operand(1)->shape().rank()) {\n non_canonical_dots.push_back(instruction);\n continue;\n }\n if (dnums.lhs_batch_dimensions().empty() &&\n dnums.lhs_contracting_dimensions().empty()) {\n non_canonical_dots.push_back(instruction);\n continue;\n }\n std::vector canonical_batch_dims(\n dnums.lhs_batch_dimensions_size());\n absl::c_iota(canonical_batch_dims, 0);\n if (!absl::c_equal(dnums.lhs_batch_dimensions(), canonical_batch_dims) ||\n !absl::c_equal(dnums.rhs_batch_dimensions(), canonical_batch_dims)) {\n non_canonical_dots.push_back(instruction);\n }\n }\n }\n bool changed = false;\n for (auto* dot : non_canonical_dots) {\n TF_RETURN_IF_ERROR(CanonicalizeDot(Cast(dot)));\n changed = true;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/dot_decomposer.h\"\n#include \n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace {\nnamespace m = ::xla::match;\nnamespace op = ::xla::testing::opcode_matchers;\nusing DotDecomposerTest = HloTestBase;\nTEST_F(DotDecomposerTest, CanonicalizeMultipleNonContractingDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n p0 = f32[64,63,512]{2,1,0} parameter(0)\n p1 = f32[512,512]{1,0} parameter(1)\n ROOT dot = f32[64,63,512]{2,1,0} dot(p0, p1), lhs_contracting_dims={2},\n rhs_contracting_dims={0}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,\n DotDecomposer().Run(module.get()));\n EXPECT_TRUE(canonicalized);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),\n 1,\n 0),\n op::Shape(\"f32[4032,512]\"))));\n}\nTEST_F(DotDecomposerTest, DontCanonicalizeIfNoNoncontractingDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n p0 = f32[64,4]{1,0} parameter(0)\n p1 = f32[64,4]{1,0} parameter(1)\n ROOT dot = f32[64]{0} dot(p0, p1), lhs_batch_dims={0},\n lhs_contracting_dims={1},\n rhs_batch_dims={0},\n rhs_contracting_dims={1}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,\n DotDecomposer().Run(module.get()));\n EXPECT_FALSE(canonicalized);\n}\nTEST_F(DotDecomposerTest, DontAddLhsNonContractingDimIfOne) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n p0 = f32[64,4]{1,0} parameter(0)\n p1 = f32[64,4,2,1]{3,2,1,0} parameter(1)\n ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},\n lhs_contracting_dims={1},\n rhs_batch_dims={0},\n rhs_contracting_dims={1}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,\n DotDecomposer().Run(module.get()));\n EXPECT_TRUE(canonicalized);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),\n 1,\n 1),\n op::Shape(\"f32[64,2]\"))));\n}\nTEST_F(DotDecomposerTest, DontAddRhsNonContractingDimIfOne) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n p0 = f32[64,4,2,1]{3,2,1,0} parameter(0)\n p1 = f32[64,4]{1,0} parameter(1)\n ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},\n lhs_contracting_dims={1},\n rhs_batch_dims={0},\n rhs_contracting_dims={1}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,\n DotDecomposer().Run(module.get()));\n EXPECT_TRUE(canonicalized);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),\n 2,\n 1),\n op::Shape(\"f32[64,2]\"))));\n}\ntemplate \nauto SparseDotMatcher(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) {\n return match::Op()\n .WithOpcode(HloOpcode::kDot)\n .WithOperand(0, std::forward(arg0))\n .WithOperand(1, std::forward(arg1))\n .WithOperand(2, std::forward(arg2));\n}\nTEST_F(DotDecomposerTest, CanonicalizeSparseLhs) {\n absl::string_view kHlo = R\"(\n HloModule module\n ENTRY main {\n lhs = f32[16,4,3,7] parameter(0)\n rhs = f32[32,4,5,7] parameter(1)\n meta = u16[2,4,3,7] parameter(2)\n ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=L.0@2:4,\n lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},\n lhs_batch_dims={3}, rhs_batch_dims={3}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHlo));\n TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,\n DotDecomposer().Run(module.get()));\n EXPECT_TRUE(canonicalized);\n HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(\n m::Reshape(m::Transpose(m::Parameter(0))),\n m::Reshape(m::Transpose(m::Parameter(1))),\n m::Reshape(m::Transpose(m::Parameter(2)))))));\n auto dot = Cast(root->operand(0));\n auto descriptor = dot->sparsity().front();\n EXPECT_EQ(descriptor.index(), 0);\n EXPECT_EQ(descriptor.dimension(), 2);\n}\nTEST_F(DotDecomposerTest, CanonicalizeSparseRhs) {\n absl::string_view kHlo = R\"(\n HloModule module\n ENTRY main {\n lhs = f32[32,4,3,7] parameter(0)\n rhs = f32[16,4,5,7] parameter(1)\n meta = u16[2,4,5,7] parameter(2)\n ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=R.0@2:4,\n lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},\n lhs_batch_dims={3}, rhs_batch_dims={3}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHlo));\n TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,\n DotDecomposer().Run(module.get()));\n EXPECT_TRUE(canonicalized);\n HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(\n m::Reshape(m::Transpose(m::Parameter(0))),\n m::Reshape(m::Transpose(m::Parameter(1))),\n m::Reshape(m::Transpose(m::Parameter(2)))))));\n auto dot = Cast(root->operand(0));\n auto descriptor = dot->sparsity().front();\n EXPECT_EQ(descriptor.index(), 1);\n EXPECT_EQ(descriptor.dimension(), 1);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1131,"cells":{"ID":{"kind":"string","value":"8a31a24b-fd06-41cc-bf29-c12ce667dce2"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_constant_folding"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/hlo_constant_folding.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/hlo_constant_folding_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/hlo_constant_folding.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/time.h\"\n#include \"xla/hlo/evaluator/hlo_evaluator.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal.h\"\n#include \"xla/service/slow_operation_alarm.h\"\n#include \"xla/shape_util.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nstatic bool IsOrContainsIllegalInstr(const HloInstruction* instr) {\n if (instr->opcode() == HloOpcode::kAfterAll ||\n instr->opcode() == HloOpcode::kRng) {\n return true;\n }\n for (const HloComputation* c : instr->called_computations()) {\n if (absl::c_any_of(c->instructions(), IsOrContainsIllegalInstr)) {\n return true;\n }\n }\n return false;\n}\n std::atomic HloConstantFolding::slow_op_counter_{0};\nabsl::StatusOr HloConstantFolding::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n auto evaluator = std::make_unique(0);\n evaluator->set_use_fast_path(true);\n std::vector dead_instructions;\n for (auto* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n for (auto* instruction : computation->MakeInstructionPostOrder()) {\n if (instruction->IsDead()) {\n continue;\n }\n if (!absl::c_any_of(instruction->operands(),\n HloPredicateIsOp) ||\n !absl::c_all_of(\n instruction->operands(), [](const HloInstruction* operand) {\n return operand->opcode() == HloOpcode::kConstant ||\n (operand->opcode() == HloOpcode::kBroadcast &&\n operand->operand(0)->opcode() == HloOpcode::kConstant);\n })) {\n continue;\n }\n if (instruction->opcode() == HloOpcode::kParameter ||\n instruction->opcode() == HloOpcode::kConstant ||\n instruction->opcode() == HloOpcode::kTuple) {\n continue;\n }\n if (instruction->opcode() == HloOpcode::kBroadcast ||\n instruction->opcode() == HloOpcode::kIota) {\n continue;\n }\n if (instruction->IsAsynchronous() &&\n instruction->async_execution_thread() !=\n instruction->parent()->execution_thread()) {\n continue;\n }\n if (instruction->opcode() == HloOpcode::kFft) {\n continue;\n }\n if (IsOrContainsIllegalInstr(instruction)) {\n continue;\n }\n if (instruction->HasSideEffect()) {\n continue;\n }\n if (instruction->opcode() == HloOpcode::kPad &&\n instruction->operand(0)->opcode() == HloOpcode::kBroadcast &&\n instruction->operand(1)->opcode() == HloOpcode::kConstant) {\n continue;\n }\n if (instruction->shape().IsArray()) {\n int64_t elements_in_operands = 0;\n for (HloInstruction* operand : instruction->operands()) {\n if (operand->shape().IsArray()) {\n elements_in_operands += ShapeUtil::ElementsIn(operand->shape());\n }\n }\n int64_t elements_in_constant =\n ShapeUtil::ElementsIn(instruction->shape());\n static const int64_t kMaximumConstantSizeElements = 45 * 1000 * 1000;\n if (std::max(elements_in_constant, elements_in_operands) >\n kMaximumConstantSizeElements) {\n VLOG(2) << \"Ignore constant folding: result shape size is \"\n << elements_in_constant << \" total size of arguments is \"\n << elements_in_operands;\n continue;\n }\n }\n VLOG(5) << \"Constant folding: \" << instruction->ToString();\n absl::Duration slow_timeout =\n absl::Seconds(uint64_t{1} << slow_op_counter_.load());\n SlowOperationAlarm slow_alarm(slow_timeout, [instruction, slow_timeout] {\n const bool ndebug =\n#if NDEBUG\n true;\n#else\n false;\n#endif\n absl::string_view explanation_msg =\n ndebug\n ? \"This isn't necessarily a bug; constant-folding is \"\n \"inherently a trade-off between compilation time and speed \"\n \"at runtime. XLA has some guards that attempt to keep \"\n \"constant folding from taking too long, but fundamentally \"\n \"you'll always be able to come up with an input program that \"\n \"takes a long time.\\n\\n\"\n \"If you'd like to file a bug, run with envvar \"\n \"XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results.\"\n : \"XLA was built without compiler optimizations, which can be \"\n \"slow. Try rebuilding with -c opt.\";\n return absl::StrFormat(\n \"Constant folding an instruction is taking > %s:\\n\\n\"\n \" %s\\n\\n\" \n \"%s\", \n absl::FormatDuration(slow_timeout), instruction->ToString(),\n explanation_msg);\n });\n Literal result;\n if (!evaluator->TryEvaluate(\n instruction, &result,\n true)) {\n VLOG(2) << \"Constant folding failed for instruction: \"\n << instruction->ToString();\n continue;\n }\n slow_alarm.cancel();\n if (slow_alarm.fired()) {\n slow_op_counter_++;\n }\n VLOG(4) << \"Constant folded: \" << instruction->ToString();\n dead_instructions.push_back(instruction);\n HloInstruction* new_constant = instruction->AddInstruction(\n HloInstruction::CreateConstant(std::move(result)));\n if (new_constant->shape().has_layout()) {\n new_constant->mutable_shape()\n ->mutable_layout()\n ->set_element_size_in_bits(\n instruction->shape().layout().element_size_in_bits());\n }\n TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_constant));\n }\n }\n const bool changed = !dead_instructions.empty();\n for (HloInstruction* dead_instruction : dead_instructions) {\n CHECK(dead_instruction->IsDead());\n HloComputation* computation = dead_instruction->parent();\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(dead_instruction));\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_constant_folding.h\"\n#include \n#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/permutation_util.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nnamespace m = xla::match;\nusing HloConstantFoldingTest = HloTestBase;\nTEST_F(HloConstantFoldingTest, ConvertF32ToS64) {\n HloComputation::Builder builder(TestName());\n HloInstruction* input = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.0f)));\n builder.AddInstruction(\n HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {}), input));\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddEntryComputation(builder.Build());\n EXPECT_THAT(computation->root_instruction(),\n GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_TRUE(result);\n EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));\n EXPECT_EQ(\n computation->root_instruction()->literal().GetFirstElement(),\n 42);\n}\nTEST_F(HloConstantFoldingTest, ConvertS64ToF32) {\n HloComputation::Builder builder(TestName());\n HloInstruction* input = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(42)));\n builder.AddInstruction(\n HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input));\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddEntryComputation(builder.Build());\n EXPECT_THAT(computation->root_instruction(),\n GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_TRUE(result);\n EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));\n EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement(),\n 42.0f);\n}\nTEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) {\n HloComputation::Builder builder(TestName());\n HloInstruction* input = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({42.0f, 19.0f})));\n builder.AddInstruction(\n HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {2}), input));\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddEntryComputation(builder.Build());\n EXPECT_THAT(computation->root_instruction(),\n GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input))));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_TRUE(result);\n EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant()));\n EXPECT_EQ(computation->root_instruction()->literal().Get({0}), 42);\n EXPECT_EQ(computation->root_instruction()->literal().Get({1}), 19);\n}\nTEST_F(HloConstantFoldingTest, Concatenate) {\n const struct TestConfig {\n int concat_dimension;\n std::vector dimensions;\n std::vector concat_sizes;\n } test_configs[] = {\n {1, {11, 0, 7, 5, 9}, {2, 5, 7, 11}},\n {3, {1, 4, 17, 0, 8}, {1, 3, 9, 12}},\n };\n for (auto& test_config : test_configs) {\n HloComputation::Builder builder(TestName());\n std::vector dimensions(test_config.dimensions.begin(),\n test_config.dimensions.end());\n int64_t concat_size = 0;\n std::vector operands;\n for (auto csize : test_config.concat_sizes) {\n dimensions[test_config.concat_dimension] = csize;\n concat_size += csize;\n auto literal = LiteralUtil::CreateFromDimensions(F32, dimensions);\n HloInstruction* insn = builder.AddInstruction(\n HloInstruction::CreateConstant(std::move(literal)));\n operands.push_back(insn);\n }\n dimensions[test_config.concat_dimension] = concat_size;\n Shape shape = ShapeUtil::MakeShape(F32, dimensions);\n builder.AddInstruction(HloInstruction::CreateConcatenate(\n shape, operands, test_config.concat_dimension));\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddEntryComputation(builder.Build());\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_TRUE(result);\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, GmockMatch(m::Constant()));\n EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape));\n }\n}\nTEST_F(HloConstantFoldingTest, Slice) {\n HloComputation::Builder builder(TestName());\n const int64_t dimensions[] = {11, 8, 7, 5, 9};\n const int64_t slice_start[] = {4, 2, 3, 1, 5};\n const int64_t slice_limits[] = {10, 8, 6, 5, 9};\n const int64_t slice_strides[] = {1, 1, 1, 1, 1};\n TF_ASSERT_OK_AND_ASSIGN(auto literal,\n LiteralUtil::CreateRandomLiteral(\n ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));\n HloInstruction* literal_instruction = builder.AddInstruction(\n HloInstruction::CreateConstant(std::move(literal)));\n Shape shape = ShapeUtil::MakeShape(F32, {6, 6, 3, 4, 4});\n builder.AddInstruction(HloInstruction::CreateSlice(\n shape, literal_instruction, slice_start, slice_limits, slice_strides));\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddEntryComputation(builder.Build());\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_TRUE(result);\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, GmockMatch(m::Constant()));\n EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape));\n}\nTEST_F(HloConstantFoldingTest, TransposeConstantFold) {\n HloComputation::Builder builder(TestName());\n const int64_t dimensions[] = {11, 8, 7, 5, 9};\n TF_ASSERT_OK_AND_ASSIGN(auto literal,\n LiteralUtil::CreateRandomLiteral(\n ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));\n auto literal_clone = literal.Clone();\n HloInstruction* literal_instruction = builder.AddInstruction(\n HloInstruction::CreateConstant(std::move(literal)));\n Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5});\n const int64_t permutation[] = {1, 2, 0, 4, 3};\n builder.AddInstruction(\n HloInstruction::CreateTranspose(shape, literal_instruction, permutation));\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddEntryComputation(builder.Build());\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_TRUE(result);\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, GmockMatch(m::Constant()));\n EXPECT_TRUE(ShapeUtil::Compatible(root->shape(), shape));\n using NativeT = typename primitive_util::PrimitiveTypeToNative::type;\n bool matched = true;\n root->literal().EachCell(\n [&](absl::Span indices, NativeT value) {\n std::vector rindexes = PermuteInverse(indices, permutation);\n matched = matched && (value == literal_clone.Get(rindexes));\n });\n EXPECT_TRUE(matched);\n}\nconst char* const kConstantFoldReduce = R\"(\n HloModule ConstantFoldReduce\n add {\n a = s32[] parameter(0)\n b = s32[] parameter(1)\n ROOT add = s32[] add(a, b)\n }\n ENTRY r {\n x = s32[3] constant({1, 2, 3})\n init = s32[] constant(0)\n ROOT reduce = s32[] reduce(x, init), dimensions={0}, to_apply=add\n })\";\nTEST_F(HloConstantFoldingTest, ConstantFoldReduce) {\n TF_ASSERT_OK_AND_ASSIGN(auto m,\n ParseAndReturnVerifiedModule(kConstantFoldReduce));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));\n EXPECT_TRUE(result);\n EXPECT_EQ(6, m->entry_computation()\n ->root_instruction()\n ->literal()\n .GetFirstElement());\n}\nconstexpr absl::string_view kConstantFoldReduceWithMetadata = R\"(\n HloModule ConstantFoldReduce\n add {\n a = s32[] parameter(0)\n b = s32[] parameter(1)\n ROOT add = s32[] add(a, b)\n }\n ENTRY r {\n x = s32[3] constant({1, 2, 3}), metadata={op_name=\"constant\"}\n init = s32[] constant(0), metadata={op_name=\"zero_constant\"}\n ROOT reduce = s32[] reduce(x, init), metadata={op_name=\"reduce\"}, dimensions={0}, to_apply=add\n })\";\nTEST_F(HloConstantFoldingTest, ConstantFoldReduceCheckMetadata) {\n TF_ASSERT_OK_AND_ASSIGN(\n auto m, ParseAndReturnVerifiedModule(kConstantFoldReduceWithMetadata));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));\n EXPECT_TRUE(result);\n OpMetadata reduce_metadata;\n reduce_metadata.set_op_name(\"reduce\");\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n AllOf(op::Constant(), op::Metadata(reduce_metadata)));\n}\nTEST_F(HloConstantFoldingTest, ConstantFoldReduceNoLayout) {\n TF_ASSERT_OK_AND_ASSIGN(auto m,\n ParseAndReturnVerifiedModule(kConstantFoldReduce));\n HloInstruction* add = (*m->computations().begin())->root_instruction();\n LayoutUtil::ClearLayout(add->mutable_shape());\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get()));\n EXPECT_FALSE(result);\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reduce()));\n}\nconst char* const kConstantFoldLargePad = R\"(\n HloModule ConstantFoldLargePad\n ENTRY r {\n a = f32[1,1,1] constant({{{7}}})\n b = f32[] constant(42)\n ROOT pad = f32[2048,2048,128] pad(a, b), padding=1024_1023x1024_1023x64_63\n })\";\nTEST_F(HloConstantFoldingTest, DoesNotFoldLargePad) {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kConstantFoldLargePad));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_FALSE(result);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Pad(m::Constant(), m::Constant())));\n}\nTEST_F(HloConstantFoldingTest, DoesNotFoldPadBroadcast) {\n const char* const kConstantFoldPadBroadcast = R\"(\n HloModule ConstantFoldLargePad\n ENTRY r {\n a = f32[] constant(239)\n broadcast_a = f32[4] broadcast(a), dimensions={}\n b = f32[] constant(42)\n ROOT pad = f32[8] pad(f32[4] broadcast_a, f32[] b), padding=4_0\n })\";\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, ParseAndReturnVerifiedModule(kConstantFoldPadBroadcast));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_FALSE(result);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Pad(m::Broadcast(), m::Constant())));\n}\nTEST_F(HloConstantFoldingTest, DoesNotFoldSlicesWithLargeOperand) {\n const char* const kModuleStr = R\"(\n HloModule test\n ENTRY r {\n a = f32[] constant(42)\n broadcast = f32[1000000000]{0} broadcast(a), dimensions={}\n slice1 = f32[10000]{0} slice(broadcast), slice={[0:10000]}\n slice2 = f32[10000]{0} slice(broadcast), slice={[10000:20000]}\n ROOT add = f32[10000]{0} add(slice1, slice2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloConstantFolding const_folder;\n TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get()));\n EXPECT_FALSE(result);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Add(m::Slice(), m::Slice())));\n}\nTEST_F(HloConstantFoldingTest, DontFoldSubcomputationContainingAfterAll) {\n const char* const kModuleStr = R\"(\n HloModule test\n Fn {\n tok = token[] after-all()\n ROOT root = f32[10] iota(), iota_dimension=0\n }\n ENTRY entry {\n ROOT call = f32[10] call(), to_apply=Fn\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloConstantFolding constant_folding;\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&constant_folding, module.get()));\n EXPECT_FALSE(result);\n}\nTEST_F(HloConstantFoldingTest,\n DontFoldSubcomputationTransitivelyContainingRng) {\n const char* const kModuleStr = R\"(\n HloModule test\n InnerFn {\n c0 = f32[] constant(0)\n c1 = f32[] constant(1)\n ROOT rng = f32[10] rng(c0, c1), distribution=rng_uniform\n }\n Fn {\n ROOT fusion = f32[10] fusion(), kind=kLoop, calls=InnerFn\n }\n ENTRY entry {\n ROOT call = f32[10] call(), to_apply=Fn\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloConstantFolding constant_folding;\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&constant_folding, module.get()));\n EXPECT_FALSE(result);\n}\nTEST_F(HloConstantFoldingTest, FoldOpsWhereOneOperandIsBroadcast) {\n const char* const kModuleStr = R\"(\n HloModule test\n ENTRY entry {\n not_folded1 = f32[4] broadcast(f32[] constant(1))\n not_folded2 = add(f32[4] broadcast(f32[] constant(2)),\n f32[4] broadcast(f32[] constant(3)))\n folded1 = add(f32[4] broadcast(f32[] constant(5)),\n f32[4] constant({0,1,2,3}))\n folded2 = add(f32[4] constant({0,1,2,3}),\n f32[4] broadcast(f32[] constant(5)))\n ROOT root = tuple(not_folded1, not_folded2, folded1, folded2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloConstantFolding constant_folding;\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&constant_folding, module.get()));\n EXPECT_TRUE(result);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Broadcast(m::Constant()),\n m::Add(m::Broadcast(m::Constant()),\n m::Broadcast(m::Constant())),\n m::Constant(),\n m::Constant() \n )));\n}\nTEST_F(HloConstantFoldingTest, FoldInt4Ops) {\n const char* const kModuleStr = R\"(\n HloModule test\n ENTRY entry {\n c0 = s4[2]{0:E(4)} constant({1, 2})\n c1 = s4[2]{0:E(4)} constant({3, 4})\n add1 = s4[2]{0:E(4)} add(c0, c1)\n c2 = s4[]{:E(4)} constant(5)\n add2 = s4[2]{0:E(4)} add(c0, s4[2]{0:E(4)} broadcast(c2))\n ROOT root = tuple(add1, add2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloConstantFolding constant_folding;\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&constant_folding, module.get()));\n EXPECT_TRUE(result);\n auto is_4_bit = [](const HloInstruction* instr) {\n return instr->shape().layout().element_size_in_bits() == 4;\n };\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Constant().WithPredicate(is_4_bit),\n m::Constant().WithPredicate(is_4_bit))));\n}\nTEST_F(HloConstantFoldingTest, BigReduceWindow) {\n constexpr absl::string_view kModuleStr = R\"(\n HloModule test\n add_bf16 {\n lhs = bf16[] parameter(0)\n rhs = bf16[] parameter(1)\n ROOT add = bf16[] add(lhs, rhs)\n }\n ENTRY accumulated_all_reduce {\n x = bf16[160,10,10,512]{3,2,1,0} broadcast(bf16[] constant(1.0))\n init = bf16[] constant(0)\n ROOT reduce-window = reduce-window(x, init), window={size=1x2x2x1 stride=1x2x2x1}, to_apply=add_bf16\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n HloConstantFolding constant_folding;\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&constant_folding, module.get()));\n EXPECT_TRUE(result);\n}\nTEST_F(HloConstantFoldingTest, TimingConsumingTest) {\n constexpr absl::string_view mod_str = R\"(\n HloModule jit_f, entry_computation_layout={()->f32[]}\n region_0.4 {\n Arg_0.5 = f32[] parameter(0)\n Arg_1.6 = f32[] parameter(1)\n ROOT add.7 = f32[] add(Arg_0.5, Arg_1.6)\n }\n ENTRY main.9 {\n constant.1 = f32[] constant(1)\n broadcast.2 = f32[32,999,40,512]{3,2,1,0} broadcast(constant.1), dimensions={}\n constant.3 = f32[] constant(0)\n ROOT reduce.8 = f32[] reduce(broadcast.2, constant.3), dimensions={0,1,2,3}, to_apply=region_0.4\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(mod_str));\n HloConstantFolding const_fold;\n TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&const_fold, module.get()));\n EXPECT_FALSE(result);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_constant_folding.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_constant_folding_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1132,"cells":{"ID":{"kind":"string","value":"39ece867-becf-40a5-be85-a8bfb9996b0b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"all_reduce_reassociate"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/all_reduce_reassociate.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/all_reduce_reassociate_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/all_reduce_reassociate.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/literal.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/all_reduce_key.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nnamespace {\nnamespace m = match;\nbool AreAllreduceKeysEqual(AllReduceKey& key0, AllReduceKey& key1,\n bool ignore_element_type) {\n if (ignore_element_type) {\n return std::get<0>(key0) == std::get<0>(key1) &&\n std::get<2>(key0) == std::get<2>(key1) &&\n std::get<3>(key0) == std::get<3>(key1) &&\n std::get<4>(key0) == std::get<4>(key1) &&\n std::get<5>(key0) == std::get<5>(key1);\n } else {\n return key0 == key1;\n }\n}\nbool AreCompatible(const HloAllReduceInstruction* ar0,\n const HloAllReduceInstruction* ar1, ReductionKind op_kind,\n bool ignore_element_type) {\n std::optional key0 = GetAllReduceKey(ar0);\n std::optional key1 = GetAllReduceKey(ar1);\n auto kind0 = MatchReductionComputation(ar0->to_apply());\n return key0 && key1 && kind0 &&\n AreAllreduceKeysEqual(*key0, *key1, ignore_element_type) &&\n kind0 == op_kind;\n}\nHloInstruction* LookThroughForAllReduce(HloInstruction* instr,\n const Literal& reduction_identity) {\n if (instr->opcode() == HloOpcode::kDynamicSlice) {\n if (instr->operand(0)->opcode() != HloOpcode::kAllReduce ||\n instr->operand(0)->user_count() != 1 || instr->user_count() != 1) {\n return nullptr;\n }\n return instr;\n }\n while (instr->opcode() != HloOpcode::kAllReduce) {\n if (instr->user_count() != 1) {\n return nullptr;\n }\n if (instr->opcode() != HloOpcode::kReshape &&\n instr->opcode() != HloOpcode::kPad &&\n instr->opcode() != HloOpcode::kSlice &&\n instr->opcode() != HloOpcode::kConvert) {\n return nullptr;\n }\n if (instr->opcode() == HloOpcode::kPad) {\n if (!instr->operand(1)->IsConstant()) {\n return nullptr;\n }\n if (instr->operand(1)->literal() != reduction_identity) {\n return nullptr;\n }\n }\n instr = instr->mutable_operand(0);\n }\n if (instr->user_count() != 1) {\n return nullptr;\n }\n return instr;\n}\nbool ReassociateAllReduceIsProfitable(HloInstruction* ar0, HloInstruction* ar1,\n HloInstruction* reassociated_inst) {\n int64_t pre_reassociated_size = ShapeUtil::ElementsIn(ar0->shape());\n if (ar0 != ar1) {\n pre_reassociated_size += ShapeUtil::ElementsIn(ar1->shape());\n }\n return pre_reassociated_size >=\n ShapeUtil::ElementsIn(reassociated_inst->shape());\n}\nbool AreCompatibleConverts(const HloInstruction* convert0,\n const HloInstruction* convert1) {\n bool is_compatible = true;\n if (convert0) {\n is_compatible &= primitive_util::CastPreservesValues(\n convert0->operand(0)->shape().element_type(),\n convert0->shape().element_type());\n }\n if (convert1) {\n is_compatible &= primitive_util::CastPreservesValues(\n convert1->operand(0)->shape().element_type(),\n convert1->shape().element_type());\n }\n if (convert0 && convert1) {\n CHECK(convert0->shape().element_type() == convert1->shape().element_type());\n is_compatible &= convert0->operand(0)->shape().element_type() ==\n convert1->operand(0)->shape().element_type();\n }\n return is_compatible;\n}\ntemplate \nauto OptionalConvertWithOneUser(HloInstruction** optional_convert,\n Pattern pattern) {\n return m::AnyOf(\n m::Convert(optional_convert, pattern).WithOneUser(), std::move(pattern));\n}\nbool MatchOperandsToAllReduceWithOptionalConvert(HloInstruction* inst,\n HloInstruction** convert0,\n HloInstruction** convert1) {\n auto ar_op_optional_convert_pattern =\n m::Op()\n .WithOperand(0, OptionalConvertWithOneUser(convert0, m::AllReduce()))\n .WithOperand(1, OptionalConvertWithOneUser(convert1, m::AllReduce()))\n .WithPredicate([](const HloInstruction* inst) {\n return inst->shape().IsArray();\n });\n return Match(inst, ar_op_optional_convert_pattern);\n}\n} \nabsl::StatusOr AllReduceReassociate::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {\n VLOG(1)\n << \"Skip AllReduceReassociate because the module contains all-reduce \"\n \"with constrained layouts\";\n return false;\n }\n int64_t next_channel_id = hlo_query::NextChannelId(*module);\n bool changed = false;\n for (auto computation : module->computations(execution_threads)) {\n for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {\n std::optional kind = MatchReductionInstruction(inst);\n if (!kind) {\n continue;\n }\n std::optional reduction_identity =\n GetReductionIdentity(*kind, inst->shape().element_type());\n if (!reduction_identity) {\n continue;\n }\n HloInstruction* lhs = LookThroughForAllReduce(inst->mutable_operand(0),\n *reduction_identity);\n if (lhs == nullptr) {\n continue;\n }\n HloInstruction* rhs = LookThroughForAllReduce(inst->mutable_operand(1),\n *reduction_identity);\n if (rhs == nullptr) {\n continue;\n }\n if (!inst->shape().IsArray()) {\n continue;\n }\n if (lhs->opcode() != rhs->opcode() ||\n (lhs->opcode() == HloOpcode::kDynamicSlice &&\n !ShapeUtil::Compatible(lhs->operand(0)->shape(),\n rhs->operand(0)->shape()))) {\n continue;\n }\n HloAllReduceInstruction* ar0 = nullptr;\n HloAllReduceInstruction* ar1 = nullptr;\n bool reduce_scatter_pattern_match = false;\n if (lhs->opcode() == HloOpcode::kDynamicSlice) {\n HloInstruction* original_rhs_operand = rhs->mutable_operand(0);\n TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, lhs->mutable_operand(0)));\n if (!lhs->Identical(*rhs)) {\n TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));\n continue;\n }\n TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));\n ar0 = Cast(lhs->mutable_operand(0));\n ar1 = Cast(rhs->mutable_operand(0));\n reduce_scatter_pattern_match = true;\n } else {\n ar0 = Cast(lhs);\n ar1 = Cast(rhs);\n }\n if (!ReassociateAllReduceIsProfitable(lhs, rhs, inst)) {\n continue;\n }\n HloInstruction* convert0 = nullptr;\n HloInstruction* convert1 = nullptr;\n if (!MatchOperandsToAllReduceWithOptionalConvert(inst, &convert0,\n &convert1)) {\n VLOG(2) << \"One or both inputs are type-converted.\";\n }\n bool should_promote_ar = convert0 || convert1;\n if (should_promote_ar) {\n if (!reassociate_converted_ar_) {\n VLOG(2) << \"Promotions of all_reduces for reassociation will be \"\n \"disabled.\";\n continue;\n }\n if (!AreCompatibleConverts(convert0, convert1)) {\n VLOG(2) << \"Inputs' Converts are not preserving \"\n \"value, skipping\";\n continue;\n }\n }\n HloInstruction* op_operand0 = inst->mutable_operand(0);\n HloInstruction* op_operand1 = inst->mutable_operand(1);\n if (convert0) {\n op_operand0 = convert0->mutable_operand(0);\n }\n if (convert1) {\n op_operand1 = convert1->mutable_operand(0);\n }\n if (!AreCompatible(ar0, ar1, *kind,\n should_promote_ar)) {\n VLOG(2) << \"All-Reduce operations are not compatible, skipping\";\n continue;\n }\n VLOG(2) << \"Reassociated:\";\n VLOG(2) << \"\\tAR0: \" << ar0->ToString();\n VLOG(2) << \"\\tAR1: \" << ar1->ToString();\n auto op_users = inst->users();\n HloInstruction* new_op_operand0 = ar0->mutable_operand(0);\n HloInstruction* new_op_operand1 = ar1->mutable_operand(0);\n if (convert0) {\n HloInstruction* ar0_operand = ar0->mutable_operand(0);\n TF_RETURN_IF_ERROR(convert0->ReplaceOperandWith(0, ar0_operand));\n new_op_operand0 = convert0;\n }\n if (convert1) {\n HloInstruction* ar1_operand = ar1->mutable_operand(0);\n TF_RETURN_IF_ERROR(convert1->ReplaceOperandWith(0, ar1_operand));\n new_op_operand1 = convert1;\n }\n HloInstruction* new_op = inst;\n if (should_promote_ar) {\n new_op = computation->AddInstruction(inst->CloneWithNewOperands(\n inst->shape(), {new_op_operand0, new_op_operand1}));\n } else if (reduce_scatter_pattern_match) {\n new_op = computation->AddInstruction(inst->CloneWithNewOperands(\n ar0->shape(), {new_op_operand0, new_op_operand1}));\n }\n Shape new_ar_out_shape = inst->shape();\n CHECK(!should_promote_ar || !reduce_scatter_pattern_match);\n if (should_promote_ar) {\n new_ar_out_shape.set_element_type(\n new_op_operand0->shape().element_type());\n } else if (reduce_scatter_pattern_match) {\n new_ar_out_shape = ar0->shape();\n } else {\n TF_RETURN_IF_ERROR(ar0->ReplaceAllUsesWith(ar0->mutable_operand(0)));\n TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(ar1->mutable_operand(0)));\n }\n HloInstruction* new_ar = computation->AddInstruction(\n ar0->CloneWithNewOperands(new_ar_out_shape, {new_op}));\n if (new_ar->channel_id()) {\n new_ar->set_channel_id(next_channel_id++);\n }\n if (should_promote_ar) {\n HloComputation* to_apply = new_ar->to_apply();\n PrimitiveType type = new_ar->shape().element_type();\n std::string name = absl::StrCat(to_apply->name(), \"_reassoc_promoted\");\n HloComputation::Builder promoted(name);\n auto x = promoted.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(type, {}), \"x\"));\n auto y = promoted.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(type, {}), \"y\"));\n promoted.AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeShape(type, {}),\n to_apply->root_instruction()->opcode(), x, y));\n HloComputation* to_apply_promoted =\n inst->GetModule()->AddEmbeddedComputation(promoted.Build());\n new_ar->set_to_apply(to_apply_promoted);\n TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_ar));\n } else if (reduce_scatter_pattern_match) {\n auto dyn_slice_operands = lhs->mutable_operands();\n dyn_slice_operands[0] = new_ar;\n HloInstruction* new_dyn_slice = inst->parent()->AddInstruction(\n lhs->CloneWithNewOperands(inst->shape(), dyn_slice_operands));\n TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_dyn_slice));\n } else {\n TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_ar));\n }\n if (should_promote_ar || reduce_scatter_pattern_match) {\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));\n }\n if (reduce_scatter_pattern_match) {\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(lhs));\n if (lhs != rhs) {\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(rhs));\n }\n }\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));\n if (ar0 != ar1) {\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));\n }\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/all_reduce_reassociate.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace m = xla::testing::opcode_matchers;\nusing ::testing::_;\nclass AllReduceSimplifierTest : public HloTestBase {\n public:\n absl::StatusOr> RunPass(\n absl::string_view hlo_module, bool expect_change,\n bool reassociate_converted_ar = false) {\n TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));\n auto changed =\n AllReduceReassociate(reassociate_converted_ar).Run(module.get());\n if (!changed.ok()) {\n return changed.status();\n }\n EXPECT_EQ(changed.value(), expect_change);\n return absl::StatusOr>(std::move(module));\n }\n size_t AllReduceCount(std::unique_ptr& module) {\n return absl::c_count_if(module->entry_computation()->instructions(),\n HloPredicateIsOp);\n }\n};\nTEST_F(AllReduceSimplifierTest, Simple) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n ROOT add = f32[8] add(ar0, ar1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, SimpleWithChannelId) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), channel_id=1, replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), channel_id=1, replica_groups={}, to_apply=sum\n ROOT add = f32[8] add(ar0, ar1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, SimpleChain) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n p2 = f32[8] parameter(2)\n p3 = f32[8] parameter(3)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum\n ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum\n add0 = f32[8] add(ar0, ar1)\n add1 = f32[8] add(add0, ar2)\n ROOT add2 = f32[8] add(add1, ar3)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n m::AllReduce(m::Add(\n m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),\n m::Parameter(3))));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, SimpleTree) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n p2 = f32[8] parameter(2)\n p3 = f32[8] parameter(3)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum\n ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum\n add0 = f32[8] add(ar0, ar1)\n add1 = f32[8] add(ar2, ar3)\n ROOT add2 = f32[8] add(add0, add1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),\n m::Add(m::Parameter(2), m::Parameter(3)))));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, MismatchOp0) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nmax {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT r = f32[] maximum(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max\n ROOT add = f32[8] add(ar0, ar1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(AllReduceSimplifierTest, MismatchOp1) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nmax {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT r = f32[] maximum(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=max\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max\n ROOT add = f32[8] add(ar0, ar1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(AllReduceSimplifierTest, MismatchReplicaGroups) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={{0}}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n ROOT add = f32[8] add(ar0, ar1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(AllReduceSimplifierTest, MismatchHasChannelId) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, channel_id=3, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n ROOT add = f32[8] add(ar0, ar1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(AllReduceSimplifierTest, MismatchUseGlobalDeviceId) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={{0, 1}}, channel_id=3, use_global_device_ids=true, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={{0, 1}}, channel_id=4, to_apply=sum\n ROOT add = f32[8] add(ar0, ar1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(AllReduceSimplifierTest, NotSingleUser) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n add = f32[8] add(ar0, ar1)\n ROOT t = (f32[8], f32[8]) tuple(ar0, add)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n}\nTEST_F(AllReduceSimplifierTest, DoubleUse) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n add = f32[8] add(ar0, ar0)\n ROOT c = f32[8] copy(add)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n}\nTEST_F(AllReduceSimplifierTest, PaddedUse) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n %constant.1 = f32[] constant(0)\n pad = f32[12]{0} pad(ar0, constant.1), padding=0_4\n pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4\n ROOT add = f32[12] add(pad, pad.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::AllReduce(m::Add(m::Pad(m::Parameter(0), _),\n m::Pad(m::Parameter(1), _))));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, PaddedUseInvalidReduceValue) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n %constant.1 = f32[] constant(-1.0)\n pad = f32[12]{0} pad(ar0, constant.1), padding=0_4\n pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4\n ROOT add = f32[12] add(pad, pad.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n EXPECT_EQ(AllReduceCount(module), 2);\n}\nTEST_F(AllReduceSimplifierTest, PaddedUseNotProfitable) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n %constant.1 = f32[] constant(0)\n pad = f32[17]{0} pad(ar0, constant.1), padding=0_9\n pad.1 = f32[17]{0} pad(ar1, constant.1), padding=0_9\n ROOT add = f32[17] add(pad, pad.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n EXPECT_EQ(AllReduceCount(module), 2);\n}\nTEST_F(AllReduceSimplifierTest, PaddedUseDoubleUseNotProfitable) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n %constant.1 = f32[] constant(0)\n pad = f32[9]{0} pad(ar0, constant.1), padding=0_1\n ROOT add = f32[9] add(pad, pad)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, ReshapeUse) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[1,8] parameter(0)\n p1 = f32[1,8] parameter(1)\n ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum\n rshp0 = f32[8]{0} reshape(ar0)\n rshp1 = f32[8]{0} reshape(ar1)\n ROOT add = f32[8] add(rshp0, rshp1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::AllReduce(m::Add(m::Reshape(m::Parameter(0)),\n m::Reshape(m::Parameter(1)))));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, SliceUse) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum\n rshp0 = f32[4]{0} slice(ar0), slice={[0:4]}\n rshp1 = f32[4]{0} slice(ar1), slice={[0:4]}\n ROOT add = f32[4] add(rshp0, rshp1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::AllReduce(m::Add(m::Slice(m::Parameter(0)),\n m::Slice(m::Parameter(1)))));\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, ChainWithConvert) {\n absl::string_view hlo_string = R\"(\nHloModule m\nadd.1 {\n x.47 = bf16[] parameter(0)\n y.47 = bf16[] parameter(1)\n ROOT add.2532 = bf16[] add(x.47, y.47)\n}\nENTRY main {\n p0 = bf16[8] parameter(0)\n p1 = bf16[8] parameter(1)\n p2 = bf16[8] parameter(2)\n p3 = bf16[8] parameter(3)\n ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1\n ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1\n ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1\n ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1\n convert0 = f32[8] convert(ar0)\n convert1 = f32[8] convert(ar1)\n add0 = f32[8] add(convert0, convert1)\n convert2 = f32[8] convert(ar2)\n add1 = f32[8] add(add0, convert2)\n convert3 = f32[8] convert(ar3)\n add2 = f32[8] add(add1, convert3)\n ROOT convert4 = bf16[8] convert(add2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true,\n true));\n SCOPED_TRACE(module->ToString());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n m::Convert(m::AllReduce(m::Add(m::Add(m::Add(m::Convert(m::Parameter(0)),\n m::Convert(m::Parameter(1))),\n m::Convert(m::Parameter(2))),\n m::Convert(m::Parameter(3))))));\n EXPECT_EQ(AllReduceCount(module), 1);\n EXPECT_THAT(\n module->entry_computation()->root_instruction()->operand(0)->shape(),\n GmockMatch(::xla::match::Shape().WithElementType(F32)));\n}\nTEST_F(AllReduceSimplifierTest, AllreduceWithConvertIncompatibleType) {\n absl::string_view hlo_string = R\"(\nHloModule m\nadd.1 {\n x.47 = bf16[] parameter(0)\n y.47 = bf16[] parameter(1)\n ROOT add.2532 = bf16[] add(x.47, y.47)\n}\nmax.1 {\n x.48 = bf16[] parameter(0)\n y.48 = bf16[] parameter(1)\n ROOT max.2533 = bf16[] maximum(x.48, y.48)\n}\nmin.1 {\n x.49 = bf16[] parameter(0)\n y.49 = bf16[] parameter(1)\n ROOT min.2534 = bf16[] minimum(x.49, y.49)\n}\nmul.1 {\n x.50 = bf16[] parameter(0)\n y.50 = bf16[] parameter(1)\n ROOT mul.2535 = bf16[] multiply(x.50, y.50)\n}\nENTRY main {\n p0 = bf16[8] parameter(0)\n p1 = bf16[8] parameter(1)\n p2 = bf16[8] parameter(2)\n p3 = bf16[8] parameter(3)\n ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1\n ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=max.1\n ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=min.1\n ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=mul.1\n convert0 = f32[8] convert(ar0)\n convert1 = f32[8] convert(ar1)\n add0 = f32[8] add(convert0, convert1)\n convert2 = f32[8] convert(ar2)\n add1 = f32[8] add(add0, convert2)\n convert3 = f32[8] convert(ar3)\n add2 = f32[8] add(add1, convert3)\n ROOT convert4 = bf16[8] convert(add2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n SCOPED_TRACE(module->ToString());\n}\nTEST_F(AllReduceSimplifierTest, AllreduceWithLossyConvert) {\n absl::string_view hlo_string = R\"(\nHloModule m\nadd.1 {\n x.47 = bf16[] parameter(0)\n y.47 = bf16[] parameter(1)\n ROOT add.2532 = bf16[] add(x.47, y.47)\n}\nENTRY main {\n p0 = bf16[8] parameter(0)\n p1 = bf16[8] parameter(1)\n p2 = bf16[8] parameter(2)\n p3 = bf16[8] parameter(3)\n ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1\n ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1\n ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1\n ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1\n convert0 = u32[8] convert(ar0)\n convert1 = u32[8] convert(ar1)\n add0 = u32[8] add(convert0, convert1)\n convert2 = u32[8] convert(ar2)\n add1 = u32[8] add(add0, convert2)\n convert3 = u32[8] convert(ar3)\n add2 = u32[8] add(add1, convert3)\n ROOT convert4 = bf16[8] convert(add2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, false));\n SCOPED_TRACE(module->ToString());\n}\nTEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePattern) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[1,8] parameter(0)\n p1 = f32[1,8] parameter(1)\n p2 = f32[1,8] parameter(2)\n p3 = s32[] parameter(3)\n cst = s32[] constant(0)\n ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum\n ar2 = f32[1,8] all-reduce(p2), replica_groups={}, to_apply=sum\n dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}\n dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}\n dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}\n add = f32[1,4] add(dyn0, dyn1)\n ROOT add1 = f32[1,4] add(add, dyn2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::DynamicSlice(\n m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),\n m::Parameter(2))),\n m::Constant(), m::Parameter(3)));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePatternSameOperand) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[1,8] parameter(0)\n p1 = f32[1,8] parameter(1)\n p2 = s32[] parameter(2)\n cst = s32[] constant(0)\n ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar2 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum\n dyn0 = f32[1,4] dynamic-slice(ar0, cst, p2), dynamic_slice_sizes={1,4}\n dyn2 = f32[1,4] dynamic-slice(ar2, cst, p2), dynamic_slice_sizes={1,4}\n add = f32[1,4] add(dyn0, dyn0)\n ROOT add1 = f32[1,4] add(add, dyn2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::DynamicSlice(\n m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(0)),\n m::Parameter(1))),\n m::Constant(), m::Parameter(2)));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_EQ(AllReduceCount(module), 1);\n}\nTEST_F(AllReduceSimplifierTest, AllReduceDynamicSliceDifferentSlices) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[1,8] parameter(0)\n p1 = f32[1,8] parameter(1)\n p2 = f32[1,16] parameter(2)\n p3 = s32[] parameter(3)\n cst = s32[] constant(0)\n ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum\n ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum\n ar2 = f32[1,16] all-reduce(p2), replica_groups={}, to_apply=sum\n dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}\n dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}\n dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}\n add = f32[1,4] add(dyn0, dyn1)\n ROOT add1 = f32[1,4] add(add, dyn2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunPass(hlo_string, true));\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n m::Add(m::DynamicSlice(),\n m::DynamicSlice(m::AllReduce(), m::Constant(), m::Parameter(3))));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_EQ(AllReduceCount(module), 2);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1133,"cells":{"ID":{"kind":"string","value":"1b0d1084-8d5e-4768-bdf2-88e35ad4abd6"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"batch_dot_simplification"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/batch_dot_simplification.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/batch_dot_simplification_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/batch_dot_simplification.h\"\n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/shape.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nabsl::StatusOr\nBatchDotSimplification::ElideDegenerateBatchDimensionFromBatchDot(\n HloInstruction* batch_dot) {\n if (Cast(batch_dot)->sparse_operands()) {\n return false;\n }\n const auto& is_iota = [](absl::Span dims) {\n for (int64_t i = 0; i < dims.size(); ++i) {\n if (dims[i] != i) {\n return false;\n }\n }\n return true;\n };\n if (!absl::c_equal(\n batch_dot->dot_dimension_numbers().lhs_batch_dimensions(),\n batch_dot->dot_dimension_numbers().rhs_batch_dimensions()) ||\n !is_iota(batch_dot->dot_dimension_numbers().lhs_batch_dimensions())) {\n return false;\n }\n const DotDimensionNumbers& dim_numbers = batch_dot->dot_dimension_numbers();\n HloInstruction *lhs = batch_dot->mutable_operand(0),\n *rhs = batch_dot->mutable_operand(1);\n const Shape& lhs_shape = lhs->shape();\n if (dim_numbers.lhs_contracting_dimensions_size() != 1) {\n return false;\n }\n std::vector degenerate_dims;\n for (int64_t batch_dim : dim_numbers.lhs_batch_dimensions()) {\n if (lhs_shape.dimensions(batch_dim) == 1) {\n degenerate_dims.push_back(batch_dim);\n }\n }\n if (degenerate_dims.empty()) {\n return false;\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * new_lhs,\n ElideDegenerateDims(lhs, degenerate_dims));\n TF_ASSIGN_OR_RETURN(HloInstruction * new_rhs,\n ElideDegenerateDims(rhs, degenerate_dims));\n DotDimensionNumbers new_dim_numbers = dim_numbers;\n new_dim_numbers.clear_lhs_batch_dimensions();\n new_dim_numbers.clear_rhs_batch_dimensions();\n for (int64_t i = 0, e = dim_numbers.lhs_batch_dimensions_size() -\n degenerate_dims.size();\n i < e; i++) {\n new_dim_numbers.add_lhs_batch_dimensions(i);\n new_dim_numbers.add_rhs_batch_dimensions(i);\n }\n new_dim_numbers.set_lhs_contracting_dimensions(\n 0,\n new_dim_numbers.lhs_contracting_dimensions(0) - degenerate_dims.size());\n new_dim_numbers.set_rhs_contracting_dimensions(\n 0,\n new_dim_numbers.rhs_contracting_dimensions(0) - degenerate_dims.size());\n TF_ASSIGN_OR_RETURN(\n HloInstruction * new_dot,\n MakeDotHlo(new_lhs, new_rhs, new_dim_numbers,\n batch_dot->precision_config(),\n batch_dot->shape().element_type()));\n TF_ASSIGN_OR_RETURN(HloInstruction * new_dot_reshaped,\n MakeReshapeHlo(batch_dot->shape(), new_dot));\n VLOG(2) << \"Replaced \" << batch_dot->ToString() << \" with \"\n << new_dot->ToString();\n TF_RETURN_IF_ERROR(\n batch_dot->parent()->ReplaceInstruction(batch_dot, new_dot_reshaped));\n return true;\n}\nabsl::StatusOr BatchDotSimplification::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n std::vector dot_instrs;\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n absl::c_copy_if(computation->instructions(), std::back_inserter(dot_instrs),\n [](HloInstruction* instr) {\n return instr->opcode() == HloOpcode::kDot;\n });\n }\n for (HloInstruction* dot_instr : dot_instrs) {\n TF_ASSIGN_OR_RETURN(bool elided_batch_dim_from_one,\n ElideDegenerateBatchDimensionFromBatchDot(dot_instr));\n changed |= elided_batch_dim_from_one;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/batch_dot_simplification.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass BatchDotSimplificationTest : public HloTestBase {};\nTEST_F(BatchDotSimplificationTest,\n ElideSingleDegenerateBatchDotDim_VectorVector) {\n const std::string hlo_text = R\"(\nHloModule BatchDot\nmain {\n a = f32[1,3] parameter(0)\n b = f32[1,3] parameter(1)\n ROOT dot = f32[1] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_TRUE(pass.Run(m.get()).value());\n HloInstruction* root = m->entry_computation()->root_instruction();\n EXPECT_THAT(root,\n op::Reshape(op::Dot(\n op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),\n 0, 0)));\n}\nTEST_F(BatchDotSimplificationTest,\n ElideSingleDegenerateBatchDotDim_MatrixVector) {\n const std::string hlo_text = R\"(\nHloModule BatchDot\nmain {\n a = f32[1,9,3] parameter(0)\n b = f32[1,3] parameter(1)\n ROOT dot = f32[1,9] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_TRUE(pass.Run(m.get()).value());\n HloInstruction* root = m->entry_computation()->root_instruction();\n EXPECT_THAT(root,\n op::Reshape(op::Dot(\n op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),\n 1, 0)));\n}\nTEST_F(BatchDotSimplificationTest,\n ElideSingleDegenerateBatchDotDim_MatrixMatrix) {\n const std::string hlo_text = R\"(\nHloModule BatchDot\nmain {\n a = f32[1,9,3] parameter(0)\n b = f32[1,3,7] parameter(1)\n ROOT dot = f32[1,9,7] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_TRUE(pass.Run(m.get()).value());\n HloInstruction* root = m->entry_computation()->root_instruction();\n EXPECT_THAT(root,\n op::Reshape(op::Dot(\n op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),\n 1, 0)));\n}\nTEST_F(BatchDotSimplificationTest,\n ElideMultipleDegenerateBatchDotDims_VectorVector) {\n const std::string hlo_text = R\"(\nHloModule BatchDot\nmain {\n a = f32[9,1,7,1,3] parameter(0)\n b = f32[9,1,7,1,3] parameter(1)\n ROOT dot = f32[9,1,7,1] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={4}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_TRUE(pass.Run(m.get()).value());\n HloInstruction* root = m->entry_computation()->root_instruction();\n EXPECT_THAT(root,\n op::Reshape(op::Dot(\n op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),\n 2, 2)));\n}\nTEST_F(BatchDotSimplificationTest,\n ElideMultipleDegenerateBatchDotDims_VectorMatrix) {\n const std::string hlo_text = R\"(\nHloModule BatchDot\nmain {\n a = f32[9,1,7,1,3] parameter(0)\n b = f32[9,1,7,1,20,3] parameter(1)\n ROOT dot = f32[9,1,7,1,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={5}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_TRUE(pass.Run(m.get()).value());\n HloInstruction* root = m->entry_computation()->root_instruction();\n EXPECT_THAT(root,\n op::Reshape(op::Dot(\n op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),\n 2, 3)));\n}\nTEST_F(BatchDotSimplificationTest,\n ElideMultipleDegenerateBatchDotDims_MatrixMatrix) {\n const std::string hlo_text = R\"(\nHloModule BatchDot\nmain {\n a = f32[9,1,7,1,19,3] parameter(0)\n b = f32[9,1,7,1,3,20] parameter(1)\n ROOT dot = f32[9,1,7,1,19,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={5}, rhs_contracting_dims={4}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_TRUE(pass.Run(m.get()).value());\n HloInstruction* root = m->entry_computation()->root_instruction();\n EXPECT_THAT(root,\n op::Reshape(op::Dot(\n op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)),\n 3, 2)));\n}\nTEST_F(BatchDotSimplificationTest,\n ElideMultipleDegenerateBatchDotDimsNonContracting) {\n const char* hlo_text = R\"(\nHloModule BatchDot\nmain {\n a = f32[1,101] parameter(0)\n b = f32[1,101] parameter(1)\n ROOT dot = f32[1,101,101] dot(a,b), lhs_batch_dims={0},\n lhs_contracting_dims={},\n rhs_batch_dims={0},\n rhs_contracting_dims={}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_FALSE(pass.Run(m.get()).value());\n}\nTEST_F(BatchDotSimplificationTest,\n ElideMultipleDegenerateBatchDotDimsMultipleContracting) {\n const char* hlo_text = R\"(\nHloModule BatchDot\nmain {\n lhs = f32[1,5,17,10,13] parameter(0)\n rhs = f32[1,9,10,13,6,5] parameter(1)\n ROOT dot = f32[10,1,17,9,6] dot(lhs,rhs), lhs_batch_dims={3,0},\n rhs_batch_dims={2,0},\n lhs_contracting_dims={1,4},\n rhs_contracting_dims={5,3}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n BatchDotSimplification pass;\n ASSERT_FALSE(pass.Run(m.get()).value());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1134,"cells":{"ID":{"kind":"string","value":"51a7fed9-c52a-4155-b34f-5a1bf3110f66"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_liveness_analysis"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/hlo_liveness_analysis.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/hlo_liveness_analysis_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/hlo_liveness_analysis.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/functional/function_ref.h\"\n#include \"absl/log/check.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/shape_tree.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\nnamespace xla {\nnamespace {\nusing Worklist = std::deque;\nusing Workset = absl::flat_hash_set;\nvoid AddToWorklist(const HloInstruction* instruction, Worklist* worklist,\n Workset* workset) {\n if (workset->insert(instruction).second) {\n worklist->push_back(instruction);\n VLOG(3) << \"ADD instruction: \" << instruction->name();\n }\n}\nusing VisitorFunction = absl::FunctionRef;\nvoid ForEachLiveIndex(const ShapeTree& index_tree, VisitorFunction func) {\n index_tree.ForEachElement([&](const ShapeIndex& shape_index, bool live) {\n if (live) {\n func(shape_index);\n }\n });\n}\nvoid MarkLiveAtIndex(const HloInstruction* instruction,\n const ShapeIndex& shape_index,\n HloLivenessAnalysis::HloIndexMap* live_index_map,\n Worklist* worklist, Workset* workset) {\n std::unique_ptr>& liveness = (*live_index_map)[instruction];\n if (liveness == nullptr) {\n liveness = std::make_unique>(instruction->shape(),\n false);\n }\n bool& alive = *liveness->mutable_element(shape_index);\n if (!alive) {\n AddToWorklist(instruction, worklist, workset);\n alive = true;\n VLOG(3) << \"MARK instruction: \" << instruction->name()\n << \" shape_index: \" << shape_index;\n }\n}\nvoid MarkLiveAtAllIndices(const HloInstruction* instruction,\n HloLivenessAnalysis::HloIndexMap* live_index_map,\n Worklist* worklist, Workset* workset) {\n bool add_to_worklist = false;\n std::unique_ptr>& liveness = (*live_index_map)[instruction];\n if (liveness == nullptr) {\n liveness = std::make_unique>(instruction->shape(),\n true);\n add_to_worklist = true;\n } else {\n for (auto& entry : *liveness) {\n if (!entry.second) {\n add_to_worklist = true;\n entry.second = true;\n VLOG(3) << \"MARK instruction: \" << instruction->name()\n << \" shape_index: \" << entry.first;\n }\n }\n }\n if (add_to_worklist) {\n AddToWorklist(instruction, worklist, workset);\n }\n}\nvoid PropagateLivenessThroughTuple(\n const HloInstruction* instruction,\n HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,\n Workset* workset) {\n CHECK_EQ(instruction->opcode(), HloOpcode::kTuple);\n const ShapeTree& index_tree = *live_index_map->at(instruction);\n ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {\n const size_t size = shape_index.size();\n if (size == 0) {\n return;\n }\n const int64_t operand_index = shape_index[0];\n if (operand_index >= instruction->operand_count()) {\n return;\n }\n MarkLiveAtIndex(instruction->operand(operand_index), {}, live_index_map,\n worklist, workset);\n ShapeIndex operand_shape_index(size - 1);\n for (int i = 1; i < size; ++i) {\n operand_shape_index[i - 1] = shape_index[i];\n }\n MarkLiveAtIndex(instruction->operand(operand_index), operand_shape_index,\n live_index_map, worklist, workset);\n });\n}\nvoid PropagateLivenessThroughGTE(\n const HloInstruction* instruction,\n HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,\n Workset* workset) {\n CHECK_EQ(instruction->opcode(), HloOpcode::kGetTupleElement);\n MarkLiveAtIndex(instruction->operand(0), {}, live_index_map, worklist,\n workset);\n const ShapeTree& index_tree = *live_index_map->at(instruction);\n ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {\n ShapeIndex operand_shape_index(shape_index);\n operand_shape_index.push_front(instruction->tuple_index());\n MarkLiveAtIndex(instruction->operand(0), operand_shape_index,\n live_index_map, worklist, workset);\n });\n}\nvoid PropagateLivenessThroughWhile(\n const HloInstruction* instruction,\n HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,\n Workset* workset) {\n CHECK_EQ(instruction->opcode(), HloOpcode::kWhile);\n const ShapeTree& index_tree = *live_index_map->at(instruction);\n ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {\n MarkLiveAtIndex(instruction->while_body()->root_instruction(), shape_index,\n live_index_map, worklist, workset);\n MarkLiveAtIndex(instruction->operand(0), shape_index, live_index_map,\n worklist, workset);\n });\n MarkLiveAtIndex(instruction->while_condition()->root_instruction(), {},\n live_index_map, worklist, workset);\n}\nvoid PropagateLivenessToParameterCallers(\n const HloInstruction* instruction,\n HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,\n Workset* workset, CallGraph* call_graph) {\n CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);\n const CallGraphNode& call_graph_node =\n call_graph->GetNode(instruction->parent());\n if (call_graph_node.context() == CallContext::kControlFlow) {\n for (const CallSite& callsite : call_graph_node.caller_callsites()) {\n if (callsite.instruction()->opcode() == HloOpcode::kWhile) {\n auto* xla_while = callsite.instruction();\n const ShapeTree& index_tree = *live_index_map->at(instruction);\n ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {\n MarkLiveAtIndex(xla_while, shape_index, live_index_map, worklist,\n workset);\n MarkLiveAtIndex(xla_while->while_body()->root_instruction(),\n shape_index, live_index_map, worklist, workset);\n MarkLiveAtIndex(xla_while->operand(0), shape_index, live_index_map,\n worklist, workset);\n });\n }\n }\n }\n}\nvoid PropagateLivenessThroughControlFlow(\n const HloInstruction* instruction,\n HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,\n Workset* workset, CallGraph* call_graph) {\n const CallGraphNode& call_graph_node =\n call_graph->GetNode(instruction->parent());\n if (call_graph_node.context() == CallContext::kControlFlow) {\n for (const CallSite& callsite : call_graph_node.caller_callsites()) {\n HloInstruction* caller = callsite.instruction();\n if (caller->opcode() == HloOpcode::kWhile) {\n MarkLiveAtIndex(caller->while_condition()->root_instruction(), {},\n live_index_map, worklist, workset);\n } else if (caller->opcode() == HloOpcode::kConditional) {\n MarkLiveAtIndex(caller->operand(0), {}, live_index_map, worklist,\n workset);\n MarkLiveAtIndex(caller, {}, live_index_map, worklist, workset);\n const HloComputation* callee_comp = instruction->parent();\n int64_t operand_index = 1;\n for (auto* caller_comp : caller->called_computations()) {\n if (callee_comp == caller_comp) {\n MarkLiveAtIndex(caller->operand(operand_index), {}, live_index_map,\n worklist, workset);\n if (instruction->opcode() == HloOpcode::kParameter) {\n const ShapeTree& index_tree =\n *live_index_map->at(instruction);\n ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {\n MarkLiveAtIndex(caller->operand(operand_index), shape_index,\n live_index_map, worklist, workset);\n });\n }\n break;\n }\n ++operand_index;\n }\n }\n }\n }\n}\n} \nHloLivenessAnalysis::HloLivenessAnalysis(const HloModule& module)\n : module_(module), call_graph_(CallGraph::Build(&module)) {}\nvoid HloLivenessAnalysis::RunAnalysis() {\n Worklist worklist;\n Workset workset;\n MarkLiveAtAllIndices(module_.entry_computation()->root_instruction(),\n &live_index_map_, &worklist, &workset);\n for (auto* computation : module_.computations()) {\n for (auto* instruction : computation->instructions()) {\n if (instruction->HasSideEffectNoRecurse()) {\n MarkLiveAtAllIndices(instruction, &live_index_map_, &worklist,\n &workset);\n }\n }\n }\n while (!worklist.empty()) {\n const HloInstruction* instruction = worklist.front();\n worklist.pop_front();\n workset.erase(workset.find(instruction));\n VLOG(1) << \"VISIT instruction: \" << instruction->name();\n if (instruction->opcode() == HloOpcode::kTuple) {\n PropagateLivenessThroughTuple(instruction, &live_index_map_, &worklist,\n &workset);\n } else if (instruction->opcode() == HloOpcode::kGetTupleElement) {\n PropagateLivenessThroughGTE(instruction, &live_index_map_, &worklist,\n &workset);\n } else if (instruction->opcode() == HloOpcode::kWhile) {\n PropagateLivenessThroughWhile(instruction, &live_index_map_, &worklist,\n &workset);\n } else if (instruction->opcode() == HloOpcode::kParameter) {\n PropagateLivenessToParameterCallers(instruction, &live_index_map_,\n &worklist, &workset,\n call_graph_.get());\n } else {\n for (auto* called_computation : instruction->called_computations()) {\n MarkLiveAtAllIndices(called_computation->root_instruction(),\n &live_index_map_, &worklist, &workset);\n }\n for (HloInstruction* operand : instruction->operands()) {\n MarkLiveAtAllIndices(operand, &live_index_map_, &worklist, &workset);\n }\n }\n PropagateLivenessThroughControlFlow(instruction, &live_index_map_,\n &worklist, &workset, call_graph_.get());\n }\n}\nbool HloLivenessAnalysis::IsLive(const HloInstruction* instruction,\n const ShapeIndex& shape_index) const {\n auto it = live_index_map_.find(instruction);\n return (it != live_index_map_.end()) && it->second->element(shape_index);\n}\nabsl::StatusOr> HloLivenessAnalysis::Run(\n const HloModule& module) {\n VLOG(1) << \"HloLivenessAnalysis::Run on module \" << module.name();\n XLA_VLOG_LINES(2, module.ToString());\n auto liveness_analysis = absl::WrapUnique(new HloLivenessAnalysis(module));\n liveness_analysis->RunAnalysis();\n return std::move(liveness_analysis);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_liveness_analysis.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace {\nclass HloLivenessAnalysisTest : public HloTestBase {\n protected:\n HloLivenessAnalysisTest() {}\n const HloLivenessAnalysis& RunLiveness(HloModule* module) {\n liveness_ = HloLivenessAnalysis::Run(*module).value();\n return *liveness_;\n }\n HloInstruction* GetInstruction(HloModule* module, const std::string& name) {\n HloInstruction* to_return = nullptr;\n for (auto* comp : module->computations()) {\n for (auto* inst : comp->instructions()) {\n if (inst->name() == name) {\n to_return = inst;\n break;\n }\n }\n }\n return CHECK_NOTNULL(to_return);\n }\n std::unique_ptr liveness_;\n};\nTEST_F(HloLivenessAnalysisTest, AddAtEntryRoot) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleModule\n ENTRY SimpleComputation {\n constant.1 = s32[] constant(0)\n constant.2 = s32[] constant(1)\n ROOT add = s32[] add(constant.1, constant.2)\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"add\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.2\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, DeadAdd) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleModule\n ENTRY SimpleComputation {\n constant.1 = s32[] constant(0)\n constant.2 = s32[] constant(1)\n add.1 = s32[] add(constant.1, constant.2)\n ROOT add.2 = s32[] add(constant.1, constant.2)\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"add.2\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.2\"), {}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"add.1\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, TupleAtEntryRoot) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleModule\n ENTRY SimpleComputation {\n constant.1 = s32[] constant(0)\n constant.2 = s32[] constant(1)\n ROOT tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.2\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, NestedTupleAtEntryRoot) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleModule\n ENTRY SimpleComputation {\n constant.1 = s32[] constant(1)\n constant.2 = s32[] constant(2)\n constant.3 = s32[] constant(3)\n tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)\n ROOT tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1, 0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1, 1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.2\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.3\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, GteOfTuple) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleModule\n ENTRY SimpleComputation {\n constant.1 = s32[] constant(0)\n constant.2 = s32[] constant(1)\n tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)\n ROOT get-tuple-element.1 = s32[] get-tuple-element(tuple.1), index=0\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"get-tuple-element.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.1\"), {}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"constant.2\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, GteOfNestedTuple) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleModule\n ENTRY SimpleComputation {\n constant.1 = s32[] constant(0)\n constant.2 = s32[] constant(1)\n constant.3 = s32[] constant(2)\n tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)\n tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)\n ROOT get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"get-tuple-element.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(\n GetInstruction(module.get(), \"get-tuple-element.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(\n GetInstruction(module.get(), \"get-tuple-element.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1, 0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1, 1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"constant.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.2\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.3\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, GteOfGteOfNestedTuple) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleModule\n ENTRY SimpleComputation {\n constant.1 = s32[] constant(0)\n constant.2 = s32[] constant(1)\n constant.3 = s32[] constant(2)\n tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)\n tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)\n get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1\n ROOT get-tuple-element.2 = s32[] get-tuple-element(get-tuple-element.1), index=0\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"get-tuple-element.2\"), {}));\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"get-tuple-element.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(\n GetInstruction(module.get(), \"get-tuple-element.1\"), {0}));\n EXPECT_FALSE(liveness.IsLive(\n GetInstruction(module.get(), \"get-tuple-element.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1, 0}));\n EXPECT_FALSE(\n liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1, 1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"constant.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.2\"), {}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"constant.3\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, WhileWithDeadTupleElement) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleLoop\n SimpleLoop.body {\n loop_var.1 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n constant.1 = s32[] constant(1)\n add.0 = s32[] add(get-tuple-element.1, constant.1)\n get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1\n multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)\n ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)\n }\n SimpleLoop.condition {\n loop_var.2 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0\n constant.2 = s32[] constant(5)\n ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT\n }\n ENTRY SimpleLoop {\n constant.3 = s32[] constant(0)\n constant.4 = s32[3]{0} constant({0, 1, 2})\n tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)\n while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=\n SimpleLoop.condition, body=SimpleLoop.body\n ROOT get-tuple-element.4 = s32[] get-tuple-element(while.0), index=0\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"get-tuple-element.4\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.0\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.0\"), {0}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"while.0\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.3\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.0\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.0\"), {0}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"tuple.0\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"add.0\"), {}));\n EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), \"multiply.0\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, WhileCondPropagatesLiveness) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleLoop\n add_S32 {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n }\n SimpleLoop.body {\n loop_var.1 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n constant.1 = s32[] constant(1)\n add.0 = s32[] add(get-tuple-element.1, constant.1)\n get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1\n multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)\n ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)\n }\n SimpleLoop.condition {\n loop_var.2 = (s32[], s32[3]{0}) parameter(0)\n get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0\n get-tuple-element.4 = s32[3]{0} get-tuple-element(loop_var.2), index=1\n zero = s32[] constant(0)\n reduce = s32[] reduce(get-tuple-element.4, zero), dimensions={0}, to_apply=add_S32\n add.1 = s32[] add(get-tuple-element.3, reduce)\n constant.2 = s32[] constant(5)\n ROOT less-than = pred[] compare(add.1, constant.2), direction=LT\n }\n ENTRY SimpleLoop {\n constant.3 = s32[] constant(0)\n constant.4 = s32[3]{0} constant({0, 1, 2})\n tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)\n while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=\n SimpleLoop.condition, body=SimpleLoop.body\n ROOT get-tuple-element.5 = s32[] get-tuple-element(while.0), index=0\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"get-tuple-element.5\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.0\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.0\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.0\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.3\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.4\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.0\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.0\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.0\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"add.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"multiply.0\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, WhileWithLiveTupleElements) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule SimpleLoop\n SimpleLoop.body {\n loop_var.1 = (s32[], s32[], s32[]) parameter(0)\n get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0\n get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1\n add.1 = s32[] add(get-tuple-element.1, get-tuple-element.2)\n get-tuple-element.3 = s32[] get-tuple-element(loop_var.1), index=2\n multiply.1 = s32[] multiply(get-tuple-element.3, get-tuple-element.3)\n ROOT tuple.1 = (s32[], s32[], s32[]) tuple(add.1, get-tuple-element.3, multiply.1)\n }\n SimpleLoop.condition {\n loop_var.2 = (s32[], s32[], s32[]) parameter(0)\n get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0\n constant.1 = s32[] constant(5)\n ROOT less-than = pred[] compare(get-tuple-element.4, constant.1), direction=LT\n }\n ENTRY SimpleLoop {\n constant.2 = s32[] constant(0)\n constant.3 = s32[] constant(1)\n constant.4 = s32[] constant(2)\n tuple.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.3, constant.4)\n while.1 = (s32[], s32[], s32[]) while(tuple.2), condition=\n SimpleLoop.condition, body=SimpleLoop.body\n ROOT get-tuple-element.5 = s32[] get-tuple-element(while.1), index=0\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"get-tuple-element.5\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.1\"), {2}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.2\"), {2}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.1\"), {2}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"loop_var.1\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"loop_var.1\"), {0}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"loop_var.1\"), {1}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"loop_var.1\"), {2}));\n}\nTEST_F(HloLivenessAnalysisTest, WhileWithOutfeed) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule OutfeedLoop\n WhileBody {\n body_param = (s32[]) parameter(0)\n token0 = token[] after-all()\n constant.2 = s32[] constant(2)\n outfeed_tuple = (s32[]) outfeed(constant.2, token0)\n get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0\n constant.1 = s32[] constant(1)\n add = s32[] add(get-tuple-element.1, constant.1)\n ROOT tuple = (s32[]) tuple(add)\n }\n WhileCondition {\n cond_param = (s32[]) parameter(0)\n get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0\n constant.2 = s32[] constant(10)\n ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT\n }\n ENTRY SimpleLoop {\n constant.3 = s32[] constant(0)\n tuple.1 = (s32[]) tuple(constant.3)\n while = (s32[]) while(tuple.1), condition=WhileCondition,\n body=WhileBody\n ROOT rtuple = () tuple()\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"add\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.3\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, NestedWhileWithOutfeed) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule OutfeedLoop\n InnerWhileBody {\n body_param = (s32[]) parameter(0)\n token0 = token[] after-all()\n constant.2 = s32[] constant(2)\n outfeed_tuple = (s32[]) outfeed(constant.2, token0)\n get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0\n constant.1 = s32[] constant(1)\n add = s32[] add(get-tuple-element.1, constant.1)\n ROOT tuple = (s32[]) tuple(add)\n }\n InnerWhileCondition {\n cond_param = (s32[]) parameter(0)\n get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0\n constant.2 = s32[] constant(10)\n ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT\n }\n OuterWhileCondition {\n cond_param.2 = (s32[]) parameter(0)\n get-tuple-element.5 = s32[] get-tuple-element(cond_param.2), index=0\n constant.5 = s32[] constant(5)\n ROOT less-than.2 = pred[] compare(get-tuple-element.5, constant.5), direction=LT\n }\n OuterWhileBody {\n body_param.2 = (s32[]) parameter(0)\n get-tuple-element.8 = s32[] get-tuple-element(body_param.2), index=0\n constant.6 = s32[] constant(0)\n tuple.2 = (s32[]) tuple(constant.6)\n inner_while = (s32[]) while(tuple.2), condition=InnerWhileCondition,\n body=InnerWhileBody\n constant.7 = s32[] constant(1)\n add.2 = s32[] add(get-tuple-element.8, constant.7)\n ROOT rtuple = (s32[]) tuple(add.2)\n }\n ENTRY SimpleLoop {\n constant.3 = s32[] constant(0)\n tuple.1 = (s32[]) tuple(constant.3)\n while = (s32[]) while(tuple.1), condition=OuterWhileCondition,\n body=OuterWhileBody\n ROOT rtuple = () tuple()\n })\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"add\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"add.2\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"constant.3\"), {}));\n}\nTEST_F(HloLivenessAnalysisTest, PropagateLivenessFromConditionalComputation) {\n auto module = ParseAndReturnVerifiedModule(R\"(\nHloModule main.67\n%region_0.10 (Arg_0.11: (s32[], s32[], f32[1024,3], s32[1])) -> (s32[], s32[], f32[1024,3], s32[1]) {\n %Arg_0.11 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)\n %get-tuple-element.17 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=0, metadata={op_name=\"while\"}\n %constant.13 = s32[] constant(1)\n %add.25 = s32[] add(s32[] %get-tuple-element.17, s32[] %constant.13), metadata={op_name=\"while/add_1\"}\n %get-tuple-element.18 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=1, metadata={op_name=\"while\"}\n %add.22 = s32[] add(s32[] %get-tuple-element.18, s32[] %constant.13), metadata={op_name=\"while/add\"}\n %get-tuple-element.19 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=2, metadata={op_name=\"while\"}\n %constant.16 = f32[] constant(0)\n %constant.15 = f32[] constant(1)\n %rng.21 = f32[3]{0} rng(f32[] %constant.16, f32[] %constant.15), distribution=rng_uniform, metadata={op_name=\"while/random_uniform/RandomUniform\"}\n %reshape.23 = f32[1,3]{1,0} reshape(f32[3]{0} %rng.21), metadata={op_name=\"while/TensorArrayV2Write/TensorListSetItem\"}\n %constant.12 = s32[] constant(0)\n %dynamic-update-slice.24 = f32[1024,3]{1,0} dynamic-update-slice(f32[1024,3]{1,0} %get-tuple-element.19, f32[1,3]{1,0} %reshape.23, s32[] %get-tuple-element.18, s32[] %constant.12), metadata={op_name=\"while/TensorArrayV2Write/TensorListSetItem\"}\n %get-tuple-element.20 = s32[1]{0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=3, metadata={op_name=\"while\"}\n ROOT %tuple.26 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %add.25, s32[] %add.22, f32[1024,3]{1,0} %dynamic-update-slice.24, s32[1]{0} %get-tuple-element.20), metadata={op_name=\"while\"}\n}\n%region_1.27 (Arg_0.28: (s32[], s32[], f32[1024,3], s32[1])) -> pred[] {\n %Arg_0.28 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)\n %get-tuple-element.30 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.28), index=1, metadata={op_name=\"while\"}\n %constant.29 = s32[] constant(1024)\n ROOT %compare.31 = pred[] compare(s32[] %get-tuple-element.30, s32[] %constant.29), direction=LT, metadata={op_name=\"while/Less\"}\n}\n%region_2.42 (Arg_0.43: (f32[3,32,32,3], token[])) -> (pred[], token[]) {\n %constant.44 = pred[] constant(true)\n %Arg_0.43 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)\n %get-tuple-element.52 = f32[3,32,32,3]{3,2,1,0} get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=0, metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n %constant.49 = f32[] constant(255.5)\n %broadcast.50 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.49), dimensions={}, metadata={op_name=\"image_sample/write_summary/summary_cond/convert_image/Mul\"}\n %multiply.53 = f32[3,32,32,3]{3,2,1,0} multiply(f32[3,32,32,3]{3,2,1,0} %get-tuple-element.52, f32[3,32,32,3]{3,2,1,0} %broadcast.50), metadata={op_name=\"image_sample/write_summary/summary_cond/convert_image/Mul\"}\n %constant.47 = f32[] constant(0)\n %broadcast.48 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.47), dimensions={}, metadata={op_name=\"image_sample/write_summary/summary_cond/convert_image/Maximum\"}\n %maximum.54 = f32[3,32,32,3]{3,2,1,0} maximum(f32[3,32,32,3]{3,2,1,0} %multiply.53, f32[3,32,32,3]{3,2,1,0} %broadcast.48), metadata={op_name=\"image_sample/write_summary/summary_cond/convert_image/Maximum\"}\n %constant.45 = f32[] constant(255)\n %broadcast.46 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.45), dimensions={}, metadata={op_name=\"image_sample/write_summary/summary_cond/convert_image/Minimum\"}\n %minimum.55 = f32[3,32,32,3]{3,2,1,0} minimum(f32[3,32,32,3]{3,2,1,0} %maximum.54, f32[3,32,32,3]{3,2,1,0} %broadcast.46), metadata={op_name=\"image_sample/write_summary/summary_cond/convert_image/Minimum\"}\n %convert.56 = u8[3,32,32,3]{3,2,1,0} convert(f32[3,32,32,3]{3,2,1,0} %minimum.55), metadata={op_name=\"image_sample/write_summary/summary_cond/convert_image\"}\n %get-tuple-element.51 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=1, metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n %send.57 = (u8[3,32,32,3]{3,2,1,0}, u32[], token[]) send(u8[3,32,32,3]{3,2,1,0} %convert.56, token[] %get-tuple-element.51), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous=\"host_compute_channel_0_args_dtoh_0\"}, metadata={op_name=\"image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor\"}\n %send-done.58 = token[] send-done((u8[3,32,32,3]{3,2,1,0}, u32[], token[]) %send.57), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous=\"host_compute_channel_0_args_dtoh_0\"}, metadata={op_name=\"image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor\"}\n ROOT %tuple.59 = (pred[], token[]) tuple(pred[] %constant.44, token[] %send-done.58), metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n}\n%region_3.60 (Arg_0.61: (f32[3,32,32,3], token[])) -> (pred[], token[]) {\n %constant.62 = pred[] constant(false)\n %Arg_0.61 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)\n %get-tuple-element.63 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.61), index=1, metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n ROOT %tuple.64 = (pred[], token[]) tuple(pred[] %constant.62, token[] %get-tuple-element.63), metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n}\nENTRY %main.67 (arg_tuple.1: (s32[])) -> () {\n %arg_tuple.1 = (s32[]{:T(256)}) parameter(0)\n %get-tuple-element.2 = s32[]{:T(256)} get-tuple-element((s32[]{:T(256)}) %arg_tuple.1), index=0\n %constant.3 = s32[] constant(0)\n %compare.8 = pred[]{:T(256)} compare(s32[]{:T(256)} %get-tuple-element.2, s32[] %constant.3), direction=EQ, metadata={op_name=\"image_sample/write_summary/Equal\"}\n %constant.5 = f32[] constant(0)\n %broadcast.6 = f32[1024,3]{1,0} broadcast(f32[] %constant.5), dimensions={}, metadata={op_name=\"tokens_accumulator\"}\n %constant.4 = s32[1]{0} constant({1024})\n %tuple.9 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %constant.3, s32[] %constant.3, f32[1024,3]{1,0} %broadcast.6, s32[1]{0} %constant.4), metadata={op_name=\"while\"}\n %while.32 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) while((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %tuple.9), condition=%region_1.27, body=%region_0.10, metadata={op_name=\"while\"}\n %get-tuple-element.33 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %while.32), index=2, metadata={op_name=\"while\"}\n %transpose.34 = f32[3,1024]{0,1} transpose(f32[1024,3]{1,0} %get-tuple-element.33), dimensions={1,0}, metadata={op_name=\"transpose.transpose/perm\"}\n %reshape.35 = f32[3,32,32,1]{3,2,1,0} reshape(f32[3,1024]{0,1} %transpose.34), metadata={op_name=\"Reshape\"}\n %broadcast.36 = f32[3,32,32,1]{3,2,1,0} broadcast(f32[3,32,32,1]{3,2,1,0} %reshape.35), dimensions={0,1,2,3}, metadata={op_name=\"Tile\"}\n %reshape.37 = f32[3,32,32]{2,1,0} reshape(f32[3,32,32,1]{3,2,1,0} %broadcast.36), metadata={op_name=\"Tile\"}\n %broadcast.38 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[3,32,32]{2,1,0} %reshape.37), dimensions={0,1,2}, metadata={op_name=\"Tile\"}\n %after-all.7 = token[] after-all(), metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n %send.39 = (pred[]{:T(256)}, u32[], token[]) send(pred[]{:T(256)} %compare.8, token[] %after-all.7), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous=\"if_predicate_channel_1_dtoh_0\"}, metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n %send-done.40 = token[] send-done((pred[]{:T(256)}, u32[], token[]) %send.39), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous=\"if_predicate_channel_1_dtoh_0\"}, metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n %tuple.41 = (f32[3,32,32,3]{3,2,1,0}, token[]) tuple(f32[3,32,32,3]{3,2,1,0} %broadcast.38, token[] %send-done.40), metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n %conditional.65 = (pred[], token[]) conditional(pred[]{:T(256)} %compare.8, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41), true_computation=%region_2.42, false_computation=%region_3.60, metadata={op_name=\"image_sample/write_summary/summary_cond\"}\n ROOT %tuple.66 = () tuple()\n}\n)\")\n .value();\n const HloLivenessAnalysis& liveness = RunLiveness(module.get());\n EXPECT_TRUE(\n liveness.IsLive(GetInstruction(module.get(), \"conditional.65\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"tuple.41\"), {}));\n EXPECT_TRUE(liveness.IsLive(\n GetInstruction(module.get(), \"get-tuple-element.33\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"while.32\"), {}));\n EXPECT_TRUE(liveness.IsLive(\n GetInstruction(module.get(), \"dynamic-update-slice.24\"), {}));\n EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), \"send.57\"), {}));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1135,"cells":{"ID":{"kind":"string","value":"02bc84dc-502e-49cb-bc34-f1006c412337"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"defuser"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/defuser.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/defuser_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/defuser.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/status.h\"\nnamespace xla {\nabsl::StatusOr Defuser::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n VLOG(1) << \"Defusing module \" << module->name();\n XLA_VLOG_LINES(2, \"Before defusion:\\n\" + module->ToString());\n bool changed = false;\n std::unique_ptr call_graph = CallGraph::Build(module);\n TF_RETURN_IF_ERROR(call_graph->VisitNodes(\n [&](const CallGraphNode& call_graph_node) -> absl::Status {\n if (call_graph_node.computation()->IsFusionComputation()) {\n TF_RET_CHECK(call_graph_node.caller_callsites().size() == 1);\n HloInstruction* fusion_instruction =\n call_graph_node.caller_callsites()[0].instruction();\n TF_RETURN_IF_ERROR(fusion_instruction->Defuse());\n changed = true;\n }\n return absl::OkStatus();\n },\n true));\n XLA_VLOG_LINES(2, \"After defusion:\\n\" + module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/defuser.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nclass DefuserTest : public HloTestBase {\n protected:\n int FusionCount(const HloModule* m) {\n int count = 0;\n for (HloComputation* computation : m->computations()) {\n if (computation->IsFusionComputation()) {\n count++;\n }\n }\n return count;\n }\n Defuser defuser_;\n const Shape shape_ = ShapeUtil::MakeShape(F32, {2, 2});\n};\nTEST_F(DefuserTest, NoFusionInstruction) {\n auto m = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param0 =\n builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, \"p0\"));\n auto param1 =\n builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, \"p1\"));\n builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));\n m->AddEntryComputation(builder.Build());\n EXPECT_EQ(0, FusionCount(m.get()));\n EXPECT_FALSE(defuser_.Run(m.get()).value());\n}\nTEST_F(DefuserTest, TrivialFusionInstructionAsRoot) {\n auto m = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param0 =\n builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, \"p0\"));\n auto param1 =\n builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, \"p1\"));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));\n auto computation = m->AddEntryComputation(builder.Build());\n computation->CreateFusionInstruction({add},\n HloInstruction::FusionKind::kLoop);\n EXPECT_THAT(computation->root_instruction(), op::Fusion());\n EXPECT_EQ(1, FusionCount(m.get()));\n EXPECT_TRUE(defuser_.Run(m.get()).value());\n EXPECT_EQ(0, FusionCount(m.get()));\n EXPECT_THAT(computation->root_instruction(),\n op::Add(op::Parameter(), op::Parameter()));\n}\nTEST_F(DefuserTest, TrivialFusionInstructionNotAsRoot) {\n auto m = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param0 =\n builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, \"p0\"));\n auto param1 =\n builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, \"p1\"));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));\n builder.AddInstruction(\n HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));\n auto computation = m->AddEntryComputation(builder.Build());\n computation->CreateFusionInstruction({add},\n HloInstruction::FusionKind::kLoop);\n EXPECT_THAT(computation->root_instruction(), op::Negate(op::Fusion()));\n EXPECT_EQ(1, FusionCount(m.get()));\n EXPECT_TRUE(defuser_.Run(m.get()).value());\n EXPECT_EQ(0, FusionCount(m.get()));\n EXPECT_THAT(computation->root_instruction(),\n op::Negate(op::Add(op::Parameter(), op::Parameter())));\n}\nTEST_F(DefuserTest, NonTrivialFusionInstruction) {\n auto m = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param0 =\n builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, \"p0\"));\n auto param1 =\n builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, \"p1\"));\n auto param3 =\n builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, \"p2\"));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));\n auto sub = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));\n auto mul = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));\n auto div = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));\n auto constant = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR2({{1.0, 2.0}, {3.0, 4.0}})));\n auto add2 = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));\n auto computation = m->AddEntryComputation(builder.Build());\n computation->CreateFusionInstruction(\n {add2, constant, div, mul, sub, negate, add},\n HloInstruction::FusionKind::kLoop);\n EXPECT_THAT(computation->root_instruction(), op::Fusion());\n EXPECT_EQ(1, FusionCount(m.get()));\n EXPECT_TRUE(defuser_.Run(m.get()).value());\n EXPECT_EQ(0, FusionCount(m.get()));\n EXPECT_THAT(computation->root_instruction(),\n op::Add(op::Constant(), op::Divide()));\n}\nTEST_F(DefuserTest, MultipleFusionInstructions) {\n auto m = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param0 =\n builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, \"p0\"));\n auto param1 =\n builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, \"p1\"));\n auto param3 =\n builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, \"p2\"));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));\n auto sub = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));\n auto mul = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));\n auto div = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));\n auto constant = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR2({{1.0, 2.0}, {3.0, 4.0}})));\n auto add2 = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));\n auto computation = m->AddEntryComputation(builder.Build());\n computation->CreateFusionInstruction({add2, constant, div, mul},\n HloInstruction::FusionKind::kLoop);\n computation->CreateFusionInstruction({sub, negate, add},\n HloInstruction::FusionKind::kLoop);\n EXPECT_THAT(computation->root_instruction(), op::Fusion());\n EXPECT_EQ(2, FusionCount(m.get()));\n EXPECT_TRUE(defuser_.Run(m.get()).value());\n EXPECT_EQ(0, FusionCount(m.get()));\n EXPECT_THAT(computation->root_instruction(),\n op::Add(op::Constant(), op::Divide()));\n}\nTEST_F(DefuserTest, NestedFusionInstructions) {\n auto m = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param0 =\n builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, \"p0\"));\n auto param1 =\n builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, \"p1\"));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));\n auto computation = m->AddEntryComputation(builder.Build());\n auto outer_fusion = computation->CreateFusionInstruction(\n {negate, add}, HloInstruction::FusionKind::kLoop);\n HloInstruction* fused_negate = outer_fusion->fused_expression_root();\n ASSERT_EQ(fused_negate->opcode(), HloOpcode::kNegate);\n outer_fusion->fused_instructions_computation()->CreateFusionInstruction(\n {fused_negate}, HloInstruction::FusionKind::kLoop);\n EXPECT_THAT(computation->root_instruction(), op::Fusion());\n EXPECT_EQ(2, FusionCount(m.get()));\n EXPECT_TRUE(defuser_.Run(m.get()).value());\n EXPECT_EQ(0, FusionCount(m.get()));\n EXPECT_THAT(computation->root_instruction(), op::Negate(op::Add()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1136,"cells":{"ID":{"kind":"string","value":"46bc99e4-502f-4dcb-8ff3-7a075b7f1f98"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"elemental_ir_emitter"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/elemental_ir_emitter.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/elemental_ir_emitter_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/elemental_ir_emitter.h\"\n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"llvm/IR/Attributes.h\"\n#include \"llvm/IR/BasicBlock.h\"\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/DerivedTypes.h\"\n#include \"llvm/IR/IRBuilder.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/Intrinsics.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Type.h\"\n#include \"llvm/Support/ModRef.h\"\n#include \"llvm/TargetParser/Triple.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/layout.h\"\n#include \"xla/service/elemental_ir_emitter.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/gpu/ir_emitter_context.h\"\n#include \"xla/service/gpu/ir_emitter_nested.h\"\n#include \"xla/service/gpu/target_util.h\"\n#include \"xla/service/llvm_ir/ir_array.h\"\n#include \"xla/service/llvm_ir/llvm_util.h\"\n#include \"xla/service/llvm_ir/math_ops.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla {\nnamespace gpu {\nGpuElementalIrEmitter::GpuElementalIrEmitter(\n IrEmitterContext& ir_emitter_context, llvm::IRBuilder<>* b)\n : ElementalIrEmitter(ir_emitter_context.llvm_module(), b),\n ir_emitter_context_(ir_emitter_context) {}\nabsl::StatusOr GpuElementalIrEmitter::EmitDeviceMathCall(\n TargetDeviceFunctionID funcid, absl::Span operands,\n absl::Span input_types, PrimitiveType output_type,\n absl::string_view name) {\n bool cast_result_to_fp16 = false;\n std::vector converted_operands(operands.begin(),\n operands.end());\n std::vector converted_input_types(input_types.begin(),\n input_types.end());\n switch (output_type) {\n case F16:\n cast_result_to_fp16 = true;\n for (int64_t i = 0; i < operands.size(); ++i) {\n if (input_types[i] == F16) {\n converted_operands[i] =\n FPCast(converted_operands[i], b()->getFloatTy());\n converted_input_types[i] = F32;\n }\n }\n output_type = F32;\n [[fallthrough]];\n case F32:\n break;\n case F64:\n break;\n default:\n return Unimplemented(\"Bad type for device math call: %s\",\n PrimitiveType_Name(output_type));\n }\n const std::string& munged_callee = ObtainDeviceFunctionName(\n funcid, output_type,\n llvm::Triple(b()->GetInsertBlock()->getModule()->getTargetTriple()));\n llvm::Value* result = EmitMathCall(munged_callee, converted_operands,\n converted_input_types, output_type, name)\n .value();\n if (cast_result_to_fp16) {\n result = FPCast(result, b()->getHalfTy());\n }\n return result;\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitMathCall(\n const std::string& callee_name, absl::Span operands,\n absl::Span input_types, PrimitiveType output_type,\n absl::string_view name) {\n for (PrimitiveType input_type : input_types) {\n if (output_type != input_type) {\n return Unimplemented(\"Input type != output type: %s != %s\",\n PrimitiveType_Name(input_type),\n PrimitiveType_Name(output_type));\n }\n }\n return EmitDeviceFunctionCall(callee_name, operands, input_types, output_type,\n llvm::AttrBuilder(b()->getContext())\n .addMemoryAttr(llvm::MemoryEffects::none())\n .addAttribute(llvm::Attribute::NoUnwind),\n b(), name);\n}\nllvm_ir::IrArray::Index GpuElementalIrEmitter::GetSourceIndexOfBitcast(\n const llvm_ir::IrArray::Index& index, const HloInstruction* hlo) {\n Shape shape = hlo->shape();\n Shape operand_shape = hlo->operand(0)->shape();\n auto gpu_config = hlo->backend_config();\n CHECK_OK(gpu_config);\n const BitcastBackendConfig& bitcast_config =\n gpu_config.value().bitcast_backend_config();\n if (!bitcast_config.result_layout().minor_to_major().empty()) {\n *shape.mutable_layout() =\n xla::Layout::CreateFromProto(bitcast_config.result_layout());\n }\n if (!bitcast_config.source_layout().minor_to_major().empty()) {\n *operand_shape.mutable_layout() =\n xla::Layout::CreateFromProto(bitcast_config.source_layout());\n }\n return index.SourceIndexOfBitcast(shape, operand_shape, b());\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitFloatBinaryOp(\n const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {\n PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();\n PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();\n PrimitiveType output_type = op->shape().element_type();\n HloOpcode opcode = op->opcode();\n if (ir_emitter_context_.debug_options().xla_gpu_enable_fast_min_max() &&\n (opcode == HloOpcode::kMaximum || opcode == HloOpcode::kMinimum)) {\n return llvm_ir::EmitCallToIntrinsic(\n opcode == HloOpcode::kMaximum ? llvm::Intrinsic::maxnum\n : llvm::Intrinsic::minnum,\n {lhs_value, rhs_value}, {lhs_value->getType()}, b());\n }\n switch (op->opcode()) {\n case HloOpcode::kRemainder: {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kFmod,\n {lhs_value, rhs_value},\n {lhs_input_type, rhs_input_type}, output_type);\n }\n case HloOpcode::kPower: {\n return EmitPowerOp(op, lhs_value, rhs_value);\n }\n default:\n return ElementalIrEmitter::EmitFloatBinaryOp(op, lhs_value, rhs_value);\n }\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitPowerOp(\n const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {\n CHECK_EQ(op->opcode(), HloOpcode::kPower);\n PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();\n PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();\n PrimitiveType output_type = op->shape().element_type();\n return EmitDeviceMathCall(TargetDeviceFunctionID::kPow,\n {lhs_value, rhs_value},\n {lhs_input_type, rhs_input_type}, output_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitLog(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kLog, {value}, {prim_type},\n prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitLog1p(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kLog1p, {value},\n {prim_type}, prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitSin(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kSin, {value}, {prim_type},\n prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitCos(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kCos, {value}, {prim_type},\n prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitTan(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kTan, {value}, {prim_type},\n prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitExp(\n PrimitiveType prim_type, llvm::Value* value, absl::string_view ) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kExp, {value}, {prim_type},\n prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitExpm1(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kExpm1, {value},\n {prim_type}, prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitPow(\n PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,\n absl::string_view name) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kPow, {lhs, rhs},\n {prim_type, prim_type}, prim_type, name);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitSqrt(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kSqrt, {value}, {prim_type},\n prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitRsqrt(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kRsqrt, {value},\n {prim_type}, prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitAtan2(\n PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,\n absl::string_view name) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kAtan2, {lhs, rhs},\n {prim_type, prim_type}, prim_type, name);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitTanh(\n PrimitiveType prim_type, llvm::Value* value) {\n if (prim_type == F64) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kTanh, {value},\n {prim_type}, prim_type);\n }\n llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();\n llvm::Value* input = FPCast(value, type);\n constexpr double kMaxValue = 20.0;\n auto max_value = llvm::ConstantFP::get(type, kMaxValue);\n llvm::Value* abs_value =\n llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {input}, {type}, b());\n llvm::Value* fast_tanh = llvm_ir::EmitFastTanh(b(), input);\n auto one = llvm::ConstantFP::get(type, 1.0);\n auto one_with_sign = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::copysign,\n {one, input}, {type}, b());\n return FPCast(Select(FCmpULT(abs_value, max_value), fast_tanh, one_with_sign),\n value->getType(), \"tanh\");\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitErf(\n PrimitiveType prim_type, llvm::Value* value) {\n if (prim_type == F64) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kErf, {value},\n {prim_type}, prim_type);\n }\n llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();\n if (type == b()->getFloatTy()) {\n llvm::Value* x = FPCast(value, type);\n auto* result = llvm_ir::EmitErfF32(b(), x);\n return FPCast(result, value->getType());\n }\n return Unimplemented(\"erf\");\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitComplexAbs(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kHypot,\n {EmitExtractReal(value), EmitExtractImag(value)},\n {prim_type, prim_type}, prim_type);\n}\nabsl::StatusOr GpuElementalIrEmitter::EmitCbrt(\n PrimitiveType prim_type, llvm::Value* value) {\n return EmitDeviceMathCall(TargetDeviceFunctionID::kCbrt, {value}, {prim_type},\n prim_type);\n}\nabsl::StatusOr>\nGpuElementalIrEmitter::EmitThreadLocalCall(\n const HloComputation& callee, absl::Span parameters,\n absl::string_view, bool ) {\n return CallNestedComputationWithScalars(b(), ir_emitter_context_, callee,\n parameters);\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/elemental_ir_emitter.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/strings/str_replace.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"llvm/IR/IRBuilder.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/IR/Module.h\"\n#include \"xla/error_spec.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/llvm_ir/ir_array.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/test_macros.h\"\n#include \"xla/types.h\"\n#include \"tsl/platform/ml_dtypes.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing std::nullopt;\nclass ElementalIrEmitterExecutionTest : public HloTestBase {\n protected:\n void RunTest(const std::string& hlo_text, absl::Span args) {\n HloModuleConfig config;\n config.set_debug_options(GetDebugOptionsForTest());\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text, config));\n EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), args, nullopt));\n }\n void RunTypeConversionTest(absl::string_view hlo_text) {\n HloModuleConfig config;\n auto debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_cpu_fast_math_honor_nans(true);\n debug_options.set_xla_cpu_fast_math_honor_infs(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text, config));\n EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));\n }\n};\nclass ElementalIrEmitterExecutionTestWithoutFastMinMax\n : public ElementalIrEmitterExecutionTest {\n protected:\n DebugOptions GetDebugOptionsForTest() override {\n DebugOptions debug_options =\n ElementalIrEmitterExecutionTest::GetDebugOptionsForTest();\n debug_options.set_xla_cpu_enable_fast_min_max(false);\n debug_options.set_xla_gpu_enable_fast_min_max(false);\n return debug_options;\n }\n};\ntemplate \nclass ElementalIrEmitterExecutionTypedTest\n : public ElementalIrEmitterExecutionTest {\n protected:\n const std::string& TypeName() {\n return primitive_util::LowercasePrimitiveTypeName(\n primitive_util::NativeToPrimitiveType());\n }\n};\nusing FloatTypes =\n ::testing::Types;\nTYPED_TEST_SUITE(ElementalIrEmitterExecutionTypedTest, FloatTypes);\nXLA_TEST_F(ElementalIrEmitterExecutionTest, DotFusion) {\n const std::string hlo_text = R\"(\nHloModule FusedDot\nfused_computation {\n arg0 = s32[1,2,1]{2,1,0} parameter(0)\n reshape.lhs = s32[2,1]{1,0} reshape(arg0)\n arg1 = s32[1,2,1]{2,1,0} parameter(1)\n reshape.rhs = s32[2,1]{1,0} reshape(arg1)\n ROOT dot = s32[1,1]{1,0} dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\nENTRY main {\n entry_arg0 = s32[1,2,1]{2,1,0} parameter(0)\n entry_arg1 = s32[1,2,1]{2,1,0} parameter(1)\n ROOT fusion = s32[1,1]{1,0} fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation\n}\n)\";\n Literal lhs = LiteralUtil::CreateR3({{{1}, {2}}});\n Literal rhs = LiteralUtil::CreateR3({{{3}, {4}}});\n RunTest(hlo_text, {&lhs, &rhs});\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTest, ScalarDotFusion) {\n const char* hlo_text = R\"(\nHloModule ScalarDotFusion\nfused_computation {\n arg0 = s32[2,2]{1,0} parameter(0)\n reshape.lhs = s32[4]{0} reshape(arg0)\n arg1 = s32[2,2]{1,0} parameter(1)\n reshape.rhs = s32[4]{0} reshape(arg1)\n ROOT dot = s32[] dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\nENTRY main {\n entry_arg0 = s32[2,2]{1,0} parameter(0)\n entry_arg1 = s32[2,2]{1,0} parameter(1)\n ROOT fusion = s32[] fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation\n}\n)\";\n Literal lhs = LiteralUtil::CreateR2({{1, 2}, {3, 4}});\n Literal rhs = LiteralUtil::CreateR2({{10, 20}, {30, 40}});\n RunTest(hlo_text, {&lhs, &rhs});\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTest, BatchDot) {\n const char* hlo_text = R\"(\nHloModule BatchDot\nfused_computation.1 {\n param_0 = f64[1,1,8]{2,1,0} parameter(0)\n r.1 = f64[2,4]{1,0} reshape(param_0)\n param_1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)\n r.2 = f64[2,4,1]{2,1,0} reshape(param_1)\n ROOT dot = f64[2,1]{1,0} dot(r.1, r.2), lhs_batch_dims={0},\n lhs_contracting_dims={1},\n rhs_batch_dims={0},\n rhs_contracting_dims={1}\n}\nENTRY resampler_Resampler.49 {\n p0 = f64[1,1,8]{2,1,0} parameter(0)\n p1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)\n ROOT f = f64[2,1]{1,0} fusion(p0, p1), kind=kLoop, calls=fused_computation.1\n}\n)\";\n HloModuleConfig config;\n auto debug_options = GetDebugOptionsForTest();\n debug_options.add_xla_disable_hlo_passes(\"layout-assignment\");\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text, config));\n EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{4e-3, 4e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTest,\n DivideComplexNumbersWithInfiniteNormRhs) {\n constexpr char hlo_text[] = R\"(\n HloModule DivideComplexNumbers\n ENTRY DivideComplexNumbers {\n constant.1 = c64[8]{0} constant({\n (1, 1), (1, inf), (1, inf), (nan, 1),\n (inf, inf), (inf, nan), (nan, nan), (1, 2)})\n real = f32[8]{0} constant({nan, nan, inf, inf, inf, 1, inf, 3})\n imag = f32[8]{0} constant({inf, inf, inf, inf, 1, inf, inf, 4})\n complex.2 = c64[8]{0} complex(real, imag)\n ROOT divide.1 = c64[8]{0} divide(constant.1, complex.2)\n }\n )\";\n HloModuleConfig config;\n auto debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_cpu_fast_math_honor_nans(true);\n debug_options.set_xla_cpu_fast_math_honor_infs(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text, config));\n EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTest,\n DivideComplexNumbersWithFiniteNormRhs) {\n constexpr char hlo_text[] = R\"(\n HloModule DivideComplexNumbers\n ENTRY DivideComplexNumbers {\n constant.1 = c64[5]{0} constant({\n (1, inf), (inf, 1), (inf, nan), (inf, inf), (nan, inf)})\n real = f32[5]{0} constant({1, 1, 1, 1, 1})\n imag = f32[5]{0} constant({1, 1, 1, 1, 1})\n complex.2 = c64[5]{0} complex(real, imag)\n ROOT divide.1 = c64[5]{0} divide(constant.1, complex.2)\n }\n )\";\n HloModuleConfig config;\n auto debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_cpu_fast_math_honor_nans(true);\n debug_options.set_xla_cpu_fast_math_honor_infs(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text, config));\n EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTest,\n DivideComplexNumbersWithZeroNormRhs) {\n constexpr char hlo_text[] = R\"(\n HloModule DivideComplexNumbers\n ENTRY DivideComplexNumbers {\n constant.1 = c64[9]{0} constant({\n (1, 1), (1, nan), (1, inf), (inf, inf), (inf, 1),\n (inf, nan), (nan, 1), (nan, inf), (nan, nan)})\n real = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})\n imag = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})\n complex.2 = c64[9]{0} complex(real, imag)\n ROOT divide.1 = c64[9]{0} divide(constant.1, complex.2)\n }\n )\";\n HloModuleConfig config;\n auto debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_cpu_fast_math_honor_nans(true);\n debug_options.set_xla_cpu_fast_math_honor_infs(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text, config));\n EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatsToFloat) {\n auto tname = this->TypeName();\n if (std::is_same() ||\n std::is_same() ||\n std::is_same() ||\n std::is_same()) {\n GTEST_SKIP() << \"Skipping test for type \" << tname;\n }\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n f16_ = f16[] parameter(0)\n f32_ = f32[] parameter(1)\n f64_ = f64[] parameter(2)\n bf16_ = bf16[] parameter(3)\n converted_f16 = ${tname}[] convert(f16_)\n converted_f32 = ${tname}[] convert(f32_)\n converted_f64 = ${tname}[] convert(f64_)\n converted_bf16 = ${tname}[] convert(bf16_)\n ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(\n converted_f16, converted_f32, converted_f64, converted_bf16)\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertSignedToFloat) {\n auto tname = this->TypeName();\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n s8_ = s8[] parameter(0)\n s16_ = s16[] parameter(1)\n s32_ = s32[] parameter(2)\n s64_ = s64[] parameter(3)\n converted_s8 = ${tname}[] convert(s8_)\n converted_s16 = ${tname}[] convert(s16_)\n converted_s32 = ${tname}[] convert(s32_)\n converted_s64 = ${tname}[] convert(s64_)\n ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(\n converted_s8, converted_s16, converted_s32, converted_s64)\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertUnsignedToFloat) {\n auto tname = this->TypeName();\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n u8_ = u8[] parameter(0)\n u16_ = u16[] parameter(1)\n u32_ = u32[] parameter(2)\n u64_ = u64[] parameter(3)\n converted_u8 = ${tname}[] convert(u8_)\n converted_u16 = ${tname}[] convert(u16_)\n converted_u32 = ${tname}[] convert(u32_)\n converted_u64 = ${tname}[] convert(u64_)\n ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(\n converted_u8, converted_u16, converted_u32, converted_u64)\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToFloats) {\n auto tname = this->TypeName();\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n to_f16 = ${tname}[] parameter(0)\n to_f32 = ${tname}[] parameter(1)\n to_f64 = ${tname}[] parameter(2)\n to_bf16 = ${tname}[] parameter(3)\n f16_ = f16[] convert(to_f16)\n f32_ = f32[] convert(to_f32)\n f64_ = f64[] convert(to_f64)\n bf16_ = bf16[] convert(to_f64)\n ROOT tuple = (f16[], f32[], f64[], bf16[]) tuple(f16_, f32_, f64_, bf16_)\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToSigned) {\n auto tname = this->TypeName();\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n to_s8 = ${tname}[] parameter(0)\n to_s16 = ${tname}[] parameter(1)\n to_s32 = ${tname}[] parameter(2)\n to_s64 = ${tname}[] parameter(3)\n s8_ = s8[] convert(to_s8)\n s16_ = s16[] convert(to_s16)\n s32_ = s32[] convert(to_s32)\n s64_ = s64[] convert(to_s64)\n ROOT tuple = (s8[], s16[], s32[], s64[]) tuple(s8_, s16_, s32_, s64_)\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToUnsigned) {\n auto tname = this->TypeName();\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n to_u8 = ${tname}[] parameter(0)\n to_u16 = ${tname}[] parameter(1)\n to_u32 = ${tname}[] parameter(2)\n to_u64 = ${tname}[] parameter(3)\n u8_ = u8[] convert(to_u8)\n u16_ = u16[] convert(to_u16)\n u32_ = u32[] convert(to_u32)\n u64_ = u64[] convert(to_u64)\n ROOT tuple = (u8[], u16[], u32[], u64[]) tuple(u8_, u16_, u32_, u64_)\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToComplex) {\n auto tname = this->TypeName();\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n to_c64 = ${tname}[] parameter(0)\n to_c128 = ${tname}[] parameter(1)\n c64_ = c64[] convert(to_c64)\n c128_ = c128[] convert(to_c128)\n ROOT tuple = (c64[], c128[]) tuple(c64_, c128_)\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, CompareFloat) {\n auto tname = this->TypeName();\n if (std::is_same()) {\n GTEST_SKIP() << \"Skipping test for type \" << tname;\n }\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n p0 = ${tname}[4] parameter(0)\n p1 = ${tname}[4] parameter(1)\n ROOT cmp = pred[4] compare(p0, p1), direction=LT\n})\",\n {{\"${tname}\", tname}});\n Literal lhs = LiteralUtil::CreateR1(\n {TypeParam(1.), TypeParam(2.), TypeParam(3.), TypeParam(4.)});\n Literal rhs = LiteralUtil::CreateR1(\n {TypeParam(4.), TypeParam(4.), TypeParam(2.), TypeParam(1.)});\n ElementalIrEmitterExecutionTest::RunTest(hlo_text, {&lhs, &rhs});\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, IotaFloat) {\n auto tname = this->TypeName();\n if (std::is_same() ||\n std::is_same() ||\n std::is_same() ||\n std::is_same() ||\n std::is_same()) {\n GTEST_SKIP() << \"Skipping test for type \" << tname;\n }\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule m\n ENTRY main {\n ROOT iota_ = ${tname}[4] iota(), iota_dimension=0\n }\n )\",\n {{\"${tname}\", tname}});\n ElementalIrEmitterExecutionTest::RunTest(hlo_text, {});\n}\nTYPED_TEST(ElementalIrEmitterExecutionTypedTest, BatchDotFloat) {\n auto tname = this->TypeName();\n const auto hlo_text = absl::StrReplaceAll(R\"(\n HloModule matmul\n ENTRY main {\n x = ${tname}[8,16] parameter(0)\n y = ${tname}[8,16,32] parameter(1)\n ROOT dot = ${tname}[8,32] dot(x, y), lhs_batch_dims={0},\n rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}\n }\n )\",\n {{\"${tname}\", tname}});\n HloModuleConfig config;\n DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n HloTestBase::ParseAndReturnVerifiedModule(hlo_text, config));\n EXPECT_TRUE(\n HloTestBase::RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n MinimumHandlesNaNsOnTheLeft) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n neg1 = f32[] constant(-1)\n neg1s = f32[5,5] broadcast(neg1), dimensions={}\n nans = f32[5,5] sqrt(neg1s)\n ROOT min = f32[5,5] minimum(nans, neg1s)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n DISABLED_MinimumHandlesNaNsOnTheRight) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n neg1 = f32[] constant(-1)\n neg1s = f32[5,5] broadcast(neg1), dimensions={}\n nans = f32[5,5] sqrt(neg1s)\n ROOT min = f32[5,5] minimum(neg1s, nans)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n MaximumHandlesNaNsOnTheLeft) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n neg1 = f32[] constant(-1)\n neg1s = f32[5,5] broadcast(neg1), dimensions={}\n nans = f32[5,5] sqrt(neg1s)\n ROOT max = f32[5,5] maximum(nans, neg1s)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n MaximumHandlesNaNsOnTheRight) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n neg1 = f32[] constant(-1)\n neg1s = f32[5,5] broadcast(neg1), dimensions={}\n nans = f32[5,5] sqrt(neg1s)\n ROOT max = f32[5,5] maximum(neg1s, nans)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n MinimumReturnsLHS) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n zero = f32[] constant(0)\n zeros = f32[5,5] broadcast(zero), dimensions={}\n one = f32[] constant(1)\n ones = f32[5,5] broadcast(one), dimensions={}\n ROOT min = f32[5,5] minimum(zeros, ones)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,\n 1e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n MinimumReturnsRHS) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n zero = f32[] constant(0)\n zeros = f32[5,5] broadcast(zero), dimensions={}\n one = f32[] constant(1)\n ones = f32[5,5] broadcast(one), dimensions={}\n ROOT min = f32[5,5] minimum(ones, zeros)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,\n 1e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n MaximumReturnsLHS) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n zero = f32[] constant(0)\n zeros = f32[5,5] broadcast(zero), dimensions={}\n one = f32[] constant(1)\n ones = f32[5,5] broadcast(one), dimensions={}\n ROOT max = f32[5,5] maximum(ones, zeros)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,\n 1e-3}));\n}\nXLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,\n MaximumReturnsRHS) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n zero = f32[] constant(0)\n zeros = f32[5,5] broadcast(zero), dimensions={}\n one = f32[] constant(1)\n ones = f32[5,5] broadcast(one), dimensions={}\n ROOT max = f32[5,5] maximum(zeros, ones)\n})\";\n EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,\n 1e-3}));\n}\nclass ElementalIrEmitterInternalTest : public HloTestBase {};\nXLA_TEST_F(ElementalIrEmitterInternalTest, SparseDotIsUnsupported) {\n constexpr absl::string_view kHloText = R\"(\nHloModule test\nENTRY main {\n lhs = f16[5,16] parameter(0)\n rhs = f16[32,10] parameter(1)\n meta = u16[5,2] parameter(2)\n ROOT dot = f32[5,10] dot(lhs, rhs, meta),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n HloInstruction* root = module->entry_computation()->root_instruction();\n llvm::LLVMContext llvm_context;\n llvm::Module llvm_module(\"\", llvm_context);\n llvm::IRBuilder<> builder(llvm_context);\n ElementalIrEmitterForTests emitter(&llvm_module, &builder);\n llvm_ir::IrArray::Index test_index{builder.getInt64Ty()};\n auto result = emitter.TestElementalDot(root, test_index);\n EXPECT_FALSE(result.ok());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/elemental_ir_emitter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/elemental_ir_emitter_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1137,"cells":{"ID":{"kind":"string","value":"570c48f7-236d-4c3e-8e7d-44cba5bf1ea5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"buffer_assignment"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/buffer_assignment.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/buffer_assignment_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/buffer_assignment.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/btree_map.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/memory/memory.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_op_metadata.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_live_range.h\"\n#include \"xla/map_util.h\"\n#include \"xla/service/buffer_value.h\"\n#include \"xla/service/buffer_value_containers.h\"\n#include \"xla/service/heap_simulator/heap_simulator.h\"\n#include \"xla/service/hlo.pb.h\"\n#include \"xla/service/hlo_alias_analysis.h\"\n#include \"xla/service/hlo_buffer.h\"\n#include \"xla/service/hlo_value.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/numbers.h\"\nnamespace xla {\nnamespace {\nusing absl::flat_hash_map;\nusing absl::flat_hash_set;\nusing absl::StrAppend;\nusing absl::StrAppendFormat;\nusing memory_space_assignment::PresetAssignments;\nusing ::tsl::strings::HumanReadableNumBytes;\nabsl::flat_hash_map BuildIdToHloInstructionMap(\n const HloModule* module) {\n absl::flat_hash_map id_to_hlo_instruction;\n for (const HloComputation* computation : module->computations()) {\n for (const HloInstruction* instruction : computation->instructions()) {\n id_to_hlo_instruction[instruction->unique_id()] = instruction;\n }\n }\n return id_to_hlo_instruction;\n}\nabsl::StatusOr>\nBuildIdToLogicalBufferMap(\n const BufferAssignmentProto& proto,\n const absl::flat_hash_map&\n id_to_hlo_instruction,\n const std::unique_ptr& alias_analysis) {\n absl::flat_hash_map id_to_logical_buffer;\n for (const LogicalBufferProto& logical_buffer_proto :\n proto.logical_buffers()) {\n TF_RET_CHECK(logical_buffer_proto.has_defined_at())\n << \"Expected logical buffer to have location information in the proto.\";\n TF_RET_CHECK(id_to_hlo_instruction.contains(\n logical_buffer_proto.defined_at().instruction_id()))\n << \"Expected hlo instruction \"\n << \"with the id '\" << logical_buffer_proto.defined_at().instruction_id()\n << \"' in the proto to also exist in the \"\n \"HLO module.\";\n const HloInstruction* hlo_instruction = id_to_hlo_instruction.at(\n logical_buffer_proto.defined_at().instruction_id());\n std::vector shape_idx_vals;\n absl::c_copy(logical_buffer_proto.defined_at().shape_index(),\n std::back_inserter(shape_idx_vals));\n ShapeIndex proto_shape_index(shape_idx_vals);\n auto& logical_buffer = alias_analysis->dataflow_analysis().GetUniqueValueAt(\n hlo_instruction, proto_shape_index);\n logical_buffer.set_color(logical_buffer_proto.color());\n id_to_logical_buffer[logical_buffer_proto.id()] = &logical_buffer;\n }\n return id_to_logical_buffer;\n}\n} \nabsl::Status GatherComputationsByAllocationType(\n const HloModule* module,\n std::vector* thread_local_computations,\n std::vector* global_computations) {\n std::deque> worklist;\n worklist.push_back(std::make_pair(module->entry_computation(),\n false));\n flat_hash_set thread_local_set;\n flat_hash_set global_set;\n while (!worklist.empty()) {\n auto worklist_front = worklist.front();\n worklist.pop_front();\n const HloComputation* computation = worklist_front.first;\n bool is_thread_local = worklist_front.second;\n bool in_thread_local_set = thread_local_set.contains(computation);\n bool in_global_set = global_set.contains(computation);\n if ((is_thread_local && in_thread_local_set) ||\n (!is_thread_local && in_global_set)) {\n continue;\n }\n if ((is_thread_local && in_global_set) ||\n (!is_thread_local && in_thread_local_set)) {\n return InvalidArgument(\n \"computation %s has conflicting allocation requirements (global \"\n \"and thread-local)\",\n computation->name());\n }\n if (is_thread_local) {\n thread_local_set.insert(computation);\n } else {\n global_set.insert(computation);\n }\n for (auto* instruction : computation->instructions()) {\n for (HloComputation* subcomputation :\n instruction->called_computations()) {\n switch (instruction->opcode()) {\n case HloOpcode::kCall:\n case HloOpcode::kConditional:\n case HloOpcode::kWhile:\n case HloOpcode::kAsyncStart:\n case HloOpcode::kAsyncUpdate:\n case HloOpcode::kAsyncDone:\n if (is_thread_local) {\n return InvalidArgument(\n \"computation %s cannot contain call/while op because it \"\n \"requires thread-local buffer allocations\",\n computation->name());\n }\n worklist.push_back(std::make_pair(subcomputation,\n false)); \n break;\n case HloOpcode::kCustomCall:\n case HloOpcode::kAllReduce:\n case HloOpcode::kReduceScatter:\n case HloOpcode::kAllReduceStart:\n case HloOpcode::kMap:\n case HloOpcode::kReduce:\n case HloOpcode::kReduceWindow:\n case HloOpcode::kScatter:\n case HloOpcode::kSelectAndScatter:\n case HloOpcode::kSort:\n case HloOpcode::kFusion:\n worklist.push_back(std::make_pair(subcomputation,\n true)); \n break;\n default:\n return Internal(\"Unexpected calling opcode: %s\",\n HloOpcodeString(instruction->opcode()));\n }\n }\n }\n }\n for (auto* computation : module->MakeComputationPostOrder()) {\n if (thread_local_set.contains(computation)) {\n thread_local_computations->push_back(computation);\n } else if (global_set.contains(computation)) {\n global_computations->push_back(computation);\n }\n }\n return absl::OkStatus();\n}\nstd::string BufferAllocation::Slice::ToString() const {\n return absl::StrCat(\"{index:\", allocation_ == nullptr ? -1 : index(),\n \", offset:\", offset_, \", size:\", size_, \"}\");\n}\nBufferAllocation::Slice BufferAllocation::GetSlice(\n const HloValue& buffer) const {\n const OffsetSize os = FindOrDie(assigned_buffers_, &buffer);\n return Slice(this, os.offset, os.size);\n}\nvoid BufferAllocation::AddAssignment(const HloValue& buffer, int64_t offset,\n int64_t size) {\n VLOG(4) << \"Adding the following buffer to allocation #\" << index()\n << absl::StrFormat(\" (size=%d, offset=%d) %s\", size, offset,\n buffer.ToShortString());\n CHECK(!assigned_buffers_.contains(&buffer))\n << \"LogicalBuffer \" << buffer << \" already assigned to allocation \"\n << index_;\n CHECK_LE(offset, size_) << \"LogicalBuffer \" << buffer\n << \" offset out of range\";\n CHECK_LE(offset + size, size_)\n << \"LogicalBuffer \" << buffer\n << \" size out of range at offset: \" << offset << \" with size: \" << size;\n if (!(IsPreallocatedTempBuffer() && color() != 0)) {\n CHECK_EQ(buffer.color(), color())\n << \"Buffer color \" << buffer.color() << \" for buffer \" << buffer\n << \" does not match allocation color \" << color() << \".\";\n }\n OffsetSize offset_size;\n offset_size.offset = offset;\n offset_size.size = size;\n assigned_buffers_.emplace(&buffer, offset_size);\n for (HloPosition position : buffer.positions()) {\n Shape* shape = ShapeUtil::GetMutableSubshape(\n position.instruction->mutable_shape(), position.index);\n if (shape->has_layout()) {\n shape->mutable_layout()->set_memory_space(buffer.color());\n }\n }\n}\nBufferAllocationProto BufferAllocation::ToProto() const {\n BufferAllocationProto proto;\n proto.set_index(index_);\n proto.set_size(size_);\n proto.set_is_thread_local(is_thread_local_);\n proto.set_is_tuple(is_tuple_);\n proto.set_color(color_);\n if (is_entry_computation_parameter_) {\n proto.set_is_entry_computation_parameter(true);\n for (int64_t idx : param_shape_index()) {\n proto.add_parameter_shape_index(idx);\n }\n proto.set_parameter_number(parameter_number_);\n }\n proto.set_is_constant(is_constant_);\n proto.set_maybe_live_out(maybe_live_out_);\n for (const auto& buffer_offset_size : assigned_buffers_) {\n BufferAllocationProto::Assigned* proto_assigned = proto.add_assigned();\n proto_assigned->set_logical_buffer_id(buffer_offset_size.first->id());\n proto_assigned->set_offset(buffer_offset_size.second.offset);\n proto_assigned->set_size(buffer_offset_size.second.size);\n }\n absl::c_sort(*proto.mutable_assigned(),\n [](const BufferAllocationProto::Assigned& assign1,\n const BufferAllocationProto::Assigned& assign2) {\n return assign1.logical_buffer_id() <\n assign2.logical_buffer_id();\n });\n return proto;\n}\nstatic bool CompareHloValuesById(const HloValue* a, const HloValue* b) {\n return a->id() < b->id();\n}\nstatic const HloInstruction* GetEntryParameterInstruction(\n const BufferAllocation& alloc) {\n for (const auto& p : alloc.assigned_buffers()) {\n const HloValue* value = p.first;\n const HloInstruction* instr = value->instruction();\n if (instr->opcode() == HloOpcode::kParameter &&\n instr->parent() == instr->GetModule()->entry_computation()) {\n return instr;\n }\n }\n return nullptr;\n}\nstatic const HloInstruction* GetOutputInstruction(\n const BufferAllocation& alloc) {\n for (const auto& p : alloc.assigned_buffers()) {\n const HloValue* value = p.first;\n for (const HloPosition& position : value->positions()) {\n const HloInstruction* instr = position.instruction;\n if (position.index.empty() &&\n instr->parent()->root_instruction() == instr &&\n instr->parent()->IsEntryComputation()) {\n return instr;\n }\n }\n }\n return nullptr;\n}\nstd::string BufferAllocation::ToShortString() const {\n std::string output;\n StrAppendFormat(&output, \"allocation %d: size %d\", index_, size());\n if (color() != 0) {\n StrAppend(&output, \", color \", color());\n }\n if (is_entry_computation_parameter()) {\n const HloInstruction* param = GetEntryParameterInstruction(*this);\n StrAppend(&output, \", parameter \", parameter_number(), \", shape |\",\n param ? param->shape().ToString(false)\n : \"\",\n \"| at ShapeIndex \", param_shape_index().ToString());\n }\n if (const HloInstruction* instr = GetOutputInstruction(*this)) {\n StrAppend(&output, \", output shape is |\",\n instr->shape().ToString(false), \"|\");\n }\n if (is_constant()) {\n StrAppend(&output, \", constant\");\n }\n if (is_thread_local()) {\n StrAppend(&output, \", thread-local\");\n }\n if (maybe_live_out()) {\n StrAppend(&output, \", maybe-live-out\");\n }\n if (IsPreallocatedTempBuffer()) {\n StrAppend(&output, \", preallocated-temp\");\n }\n StrAppend(&output, \":\\n\");\n return output;\n}\nstd::string BufferAllocation::ToString() const {\n std::string output = ToShortString();\n std::vector sorted_buffers;\n for (const auto& buffer_offset_size : assigned_buffers_) {\n sorted_buffers.push_back(buffer_offset_size.first);\n }\n absl::c_sort(sorted_buffers, &CompareHloValuesById);\n for (const HloValue* buffer : sorted_buffers) {\n const OffsetSize& offset_size = FindOrDie(assigned_buffers_, buffer);\n StrAppend(&output,\n absl::StrFormat(\n \" value: %s (size=%d,offset=%d): %s\\n\",\n buffer->ToShortString(), offset_size.size, offset_size.offset,\n ShapeUtil::HumanStringWithLayout(buffer->shape())));\n }\n return output;\n}\nstd::ostream& operator<<(std::ostream& out, const BufferAllocation& buffer) {\n out << buffer.ToString();\n return out;\n}\nstd::ostream& operator<<(std::ostream& out, const BufferAllocation::Slice& s) {\n out << s.ToString();\n return out;\n}\nbool BufferAssignment::HasAllocation(const HloValue& value) const {\n return allocation_index_for_value_.contains(&value);\n}\nbool BufferAssignment::HasAllocation(HloValue::Id value_id) const {\n return HasAllocation(dataflow_analysis().GetValue(value_id));\n}\nbool BufferAssignment::HasAllocation(const HloBuffer& buffer) const {\n return allocation_index_for_value_.contains(buffer.values()[0]);\n}\nconst BufferAllocation& BufferAssignment::GetAssignedAllocation(\n const HloValue& value) const {\n CHECK(HasAllocation(value));\n return GetAllocation(allocation_index_for_value_.at(&value));\n}\nconst BufferAllocation& BufferAssignment::GetAssignedAllocation(\n const HloBuffer& hlo_buffer) const {\n return GetAssignedAllocation(*hlo_buffer.values()[0]);\n}\nBufferAllocation* BufferAssignment::GetMutableAssignedAllocation(\n const HloBuffer& buffer) {\n return const_cast(&GetAssignedAllocation(buffer));\n}\nstd::set BufferAssignment::GetAllSlices(\n const HloInstruction* instruction, const ShapeIndex& index) const {\n std::set result;\n for (const HloValue* value :\n dataflow_analysis().GetValueSet(instruction, index).values()) {\n if (HasAllocation(*value)) {\n result.insert(GetAssignedAllocation(*value).GetSlice(*value));\n }\n }\n return result;\n}\nconst BufferAllocation& BufferAssignment::GetAllocation(\n BufferAllocation::Index index) const {\n CHECK_GE(index, 0);\n CHECK_LT(index, allocations_.size());\n return allocations_[index];\n}\nconst BufferAllocation* BufferAssignment::GetInstructionAllocation(\n const HloInstruction* hlo, const ShapeIndex& shape_index) const {\n const HloValue* value =\n dataflow_analysis().GetValueSet(hlo, shape_index).values()[0];\n if (!HasAllocation(*value)) {\n return nullptr;\n }\n const BufferAllocation& instruction_allocation =\n GetAssignedAllocation(*value);\n return &instruction_allocation;\n}\nBufferAllocation* BufferAssignment::GetMutableAllocation(\n BufferAllocation::Index index) {\n return const_cast(&GetAllocation(index));\n}\nbool BufferAssignment::HasAllocationAt(const HloInstruction* instruction,\n const ShapeIndex& index) const {\n return absl::c_any_of(\n dataflow_analysis().GetValueSet(instruction, index).values(),\n IsKeyIn(allocation_index_for_value_));\n}\nbool BufferAssignment::HasTopLevelAllocation(\n const HloInstruction* instruction) const {\n return HasAllocationAt(instruction, {});\n}\nabsl::StatusOr BufferAssignment::GetUniqueSlice(\n const HloInstruction* instruction, const ShapeIndex& index) const {\n VLOG(3) << \"Trying to find unique slice for \" << instruction->name() << \" [\"\n << index << \"]\";\n BufferAllocation::Slice result;\n for (const HloValue* value :\n dataflow_analysis().GetValueSet(instruction, index).values()) {\n VLOG(3) << \"Examining value \" << *value;\n if (HasAllocation(*value)) {\n VLOG(3) << \"Has allocation\";\n const BufferAllocation::Slice slice =\n GetAssignedAllocation(*value).GetSlice(*value);\n if (result.allocation() == nullptr) {\n result = slice;\n } else if (result != slice) {\n return FailedPrecondition(\n \"BufferAllocation::Slice for instruction %s at index %s cannot \"\n \"be determined at compile-time.\",\n instruction->name(), index.ToString());\n }\n } else {\n VLOG(3) << \"No allocation\";\n }\n }\n if (result.allocation() == nullptr) {\n return FailedPrecondition(\n \"BufferAllocation::Slice not assigned for instruction %s at index %s\",\n instruction->name(), index.ToString());\n }\n return result;\n}\nabsl::StatusOr\nBufferAssignment::GetUniqueTopLevelSlice(\n const HloInstruction* instruction) const {\n return GetUniqueSlice(instruction, {});\n}\nbool BufferAssignment::SharesSliceAtIndex(\n const HloInstruction* hlo_a, const ShapeIndex& shape_index_a,\n const HloInstruction* hlo_b, const ShapeIndex& shape_index_b) const {\n return GetUniqueSlice(hlo_a, shape_index_a).value() ==\n GetUniqueSlice(hlo_b, shape_index_b).value();\n}\nbool BufferAssignment::HaveDisjointSlices(const HloInstruction* hlo_a,\n const HloInstruction* hlo_b) const {\n using SliceSet = flat_hash_set;\n auto collect_slices = [&](const HloInstruction* instr) -> SliceSet {\n SliceSet slices;\n absl::Status status = ShapeUtil::ForEachSubshapeWithStatus(\n instr->shape(),\n [&](const Shape& ,\n const ShapeIndex& index) -> absl::Status {\n auto shape_slices = GetAllSlices(instr, index);\n if (shape_slices.empty()) {\n return InvalidArgument(\"No slices assigned to part of instr.\");\n }\n slices.insert(shape_slices.begin(), shape_slices.end());\n return absl::OkStatus();\n });\n if (!status.ok()) {\n return {};\n }\n return slices;\n };\n SliceSet slices_a = collect_slices(hlo_a);\n SliceSet slices_b = collect_slices(hlo_b);\n return !slices_a.empty() && !slices_b.empty() &&\n absl::c_none_of(slices_a, [&](const BufferAllocation::Slice& slice) {\n return slices_b.contains(slice);\n });\n}\nabsl::StatusOr\nBufferAssignment::GetUniqueTopLevelOutputSlice() const {\n return GetUniqueTopLevelSlice(\n module_->entry_computation()->root_instruction());\n}\nBufferAllocation* BufferAssignment::NewEmptyAllocation(\n int64_t size, LogicalBuffer::Color color) {\n BufferAllocation::Index index = allocations_.size();\n allocations_.emplace_back(index, size, color);\n BufferAllocation* allocation = &allocations_.back();\n return allocation;\n}\nBufferAllocation* BufferAssignment::NewAllocation(const HloBuffer& buffer,\n int64_t size) {\n BufferAllocation* allocation = NewEmptyAllocation(size, buffer.color());\n AddAssignment(allocation, buffer, 0, size);\n allocation->peak_buffers_.push_back(buffer.values()[0]);\n return allocation;\n}\nvoid BufferAssignment::AddAssignment(BufferAllocation* allocation,\n const HloBuffer& buffer, int64_t offset,\n int64_t size) {\n CHECK(allocation->is_reusable() || allocation->assigned_buffers().empty())\n << \"Non-reusable allocation already assigned a buffer: \"\n << allocation->ToString();\n for (const HloValue* buffer_value : buffer.values()) {\n CHECK(!allocation_index_for_value_.contains(buffer_value))\n << \"BufferValue \" << buffer_value << \" already has an allocation.\";\n allocation->AddAssignment(*buffer_value, offset, size);\n allocation_index_for_value_[buffer_value] = allocation->index();\n }\n if (alias_analysis().BufferLivesOut(buffer)) {\n VLOG(3) << \"HloBuffer lives out: \" << buffer.ToString();\n VLOG(3) << \"Set maybe live out: \" << allocation->ToString();\n allocation->set_maybe_live_out(true);\n }\n}\nvoid BufferAssignment::AddAssignment(BufferAllocation* allocation,\n const HloValue& value, int64_t offset,\n int64_t size) {\n allocation->AddAssignment(value, offset, size);\n allocation_index_for_value_[&value] = allocation->index();\n const HloValue& hlo_value =\n *CHECK_NOTNULL(dynamic_cast(&value));\n if (alias_analysis().ValueLivesOut(hlo_value)) {\n VLOG(3) << \"HloValue lives out: \" << hlo_value.ToString();\n VLOG(3) << \"Set maybe live out: \" << allocation->ToString();\n allocation->set_maybe_live_out(true);\n }\n}\nvoid BufferAssignment::CombineTempAllocations(\n const absl::flat_hash_set& private_stack_colors,\n std::optional temp_buffer_color) {\n VLOG(1) << \"CombineTempAllocations()\";\n std::deque combined_allocations;\n flat_hash_map combined_allocation_map;\n const auto first_temp_it =\n std::partition(allocations_.begin(), allocations_.end(),\n [](const BufferAllocation& allocation) {\n return !allocation.IsPreallocatedTempBuffer();\n });\n if (first_temp_it != allocations_.end()) {\n for (auto it = first_temp_it; it != allocations_.end(); ++it) {\n BufferAllocation& temp_allocation = *it;\n BufferValue::Color color = temp_allocation.color();\n auto combined_it = combined_allocation_map.find(color);\n if (combined_it == combined_allocation_map.end()) {\n VLOG(1) << \"Combined temp allocation for color \" << color\n << \" is: \" << temp_allocation;\n combined_allocations.emplace_back(temp_allocation);\n combined_allocation_map.emplace(color, &combined_allocations.back());\n continue;\n }\n if (combined_it->second->size() + it->size() >=\n multiheap_size_constraint_per_heap_) {\n VLOG(1) << \"Due to size constraint, reset temp allocation for color \"\n << color << \" to: \" << temp_allocation;\n combined_allocations.emplace_back(temp_allocation);\n combined_allocation_map.emplace(color, &combined_allocations.back());\n continue;\n }\n BufferAllocation* combined_allocation = combined_it->second;\n VLOG(1) << \"Combined allocation absorbing temp allocation: \"\n << temp_allocation;\n int64_t alignment = color_alignment_(color);\n int64_t base;\n bool is_private_stack = private_stack_colors.contains(color);\n if (is_private_stack) {\n base = 0;\n combined_allocation->set_size(std::max(base, temp_allocation.size()));\n } else {\n base = RoundUpTo(combined_allocation->size(), alignment);\n combined_allocation->set_size(base + temp_allocation.size());\n }\n for (const auto& buffer_offset_size : temp_allocation.assigned_buffers_) {\n const HloValue* value = buffer_offset_size.first;\n const int64_t offset = buffer_offset_size.second.offset;\n const int64_t size = buffer_offset_size.second.size;\n combined_allocation->AddAssignment(*value, base + offset, size);\n }\n if (!temp_allocation.HeapTraces().empty()) {\n CHECK_EQ(temp_allocation.HeapTraces().size(), 1);\n combined_allocation->AddHeapTrace(temp_allocation.HeapTraces().front());\n }\n if (is_private_stack) {\n if (temp_allocation.size() == combined_allocation->size()) {\n combined_allocation->peak_buffers_ = temp_allocation.peak_buffers_;\n }\n } else {\n combined_allocation->peak_buffers_.insert(\n combined_allocation->peak_buffers_.end(),\n temp_allocation.peak_buffers_.begin(),\n temp_allocation.peak_buffers_.end());\n }\n if (temp_buffer_color.has_value()) {\n if (combined_allocation->color() == 0) {\n combined_allocation->set_color(temp_buffer_color.value());\n }\n }\n }\n allocations_.erase(first_temp_it, allocations_.end());\n for (BufferAllocation& combined : combined_allocations) {\n temp_allocation_total_size_ += combined.size();\n allocations_.push_back(std::move(combined));\n }\n }\n allocation_index_for_value_.erase(allocation_index_for_value_.begin(),\n allocation_index_for_value_.end());\n for (size_t index = 0; index < allocations_.size(); ++index) {\n BufferAllocation* allocation = &allocations_[index];\n allocation->set_index(index);\n std::vector sorted_values;\n sorted_values.reserve(allocation->assigned_buffers_.size());\n for (const auto& buffer_offset_size : allocation->assigned_buffers_) {\n const HloValue* value = buffer_offset_size.first;\n sorted_values.emplace(sorted_values.end(), value);\n }\n absl::c_sort(sorted_values, &CompareHloValuesById);\n for (const HloValue* value : sorted_values) {\n allocation_index_for_value_[value] = index;\n }\n }\n}\nabsl::Status BufferAssignment::ComputeSummaryStats() {\n for (auto& allocation : Allocations()) {\n if (allocation.is_entry_computation_parameter()) {\n stats_.parameter_allocation_count++;\n stats_.parameter_allocation_bytes += allocation.size();\n }\n if (allocation.is_constant()) {\n stats_.constant_allocation_count++;\n stats_.constant_allocation_bytes += allocation.size();\n }\n if (allocation.maybe_live_out()) {\n stats_.maybe_live_out_allocation_count++;\n stats_.maybe_live_out_allocation_bytes += allocation.size();\n }\n if (allocation.IsPreallocatedTempBuffer()) {\n stats_.preallocated_temp_allocation_count++;\n stats_.preallocated_temp_allocation_bytes += allocation.size();\n }\n stats_.total_allocation_count++;\n stats_.total_allocation_bytes += allocation.size();\n }\n HloSchedule schedule(module_);\n bool schedule_complete = true;\n for (const auto& computation : module_->computations()) {\n if (!computation->IsFusionComputation()) {\n const HloInstructionSequence* sequence =\n hlo_ordering().SequentialOrder(*computation);\n if (sequence == nullptr) {\n schedule_complete = false;\n } else {\n schedule.set_sequence(computation, *sequence);\n }\n }\n }\n if (schedule_complete) {\n TF_RETURN_IF_ERROR(schedule.Verify());\n TF_ASSIGN_OR_RETURN(\n const int64_t min_size,\n HeapSimulator::MinimumMemoryForModule(schedule, buffer_size_));\n stats_.total_fragmentation_bytes = stats_.total_allocation_bytes - min_size;\n }\n return absl::OkStatus();\n}\nstd::string BufferAssignment::Stats::ToString() const {\n std::string s;\n StrAppendFormat(&s, \"BufferAssignment stats:\\n\");\n StrAppendFormat(&s, \" parameter allocation: %10s\\n\",\n HumanReadableNumBytes(parameter_allocation_bytes));\n StrAppendFormat(&s, \" constant allocation: %10s\\n\",\n HumanReadableNumBytes(constant_allocation_bytes));\n StrAppendFormat(&s, \" maybe_live_out allocation: %10s\\n\",\n HumanReadableNumBytes(maybe_live_out_allocation_bytes));\n StrAppendFormat(&s, \" preallocated temp allocation: %10s\\n\",\n HumanReadableNumBytes(preallocated_temp_allocation_bytes));\n if (preallocated_temp_fragmentation_bytes >= 0) {\n const double percent = 100. * preallocated_temp_fragmentation_bytes /\n preallocated_temp_allocation_bytes;\n StrAppendFormat(\n &s, \" preallocated temp fragmentation: %10s (%.2f%%)\\n\",\n HumanReadableNumBytes(preallocated_temp_fragmentation_bytes), percent);\n }\n StrAppendFormat(&s, \" total allocation: %10s\\n\",\n HumanReadableNumBytes(total_allocation_bytes));\n if (total_fragmentation_bytes >= 0) {\n const double percent =\n 100. * total_fragmentation_bytes / total_allocation_bytes;\n StrAppendFormat(&s, \" total fragmentation: %10s (%.2f%%)\\n\",\n HumanReadableNumBytes(total_fragmentation_bytes), percent);\n }\n return s;\n}\nstd::string BufferAssignment::ToString() const {\n std::string output;\n absl::StrAppend(&output, \"BufferAssignment:\\n\");\n std::vector used_values;\n int64_t total_size = 0;\n for (auto& allocation : allocations_) {\n total_size += allocation.size();\n absl::StrAppend(&output, allocation.ToString());\n for (const auto& p : allocation.assigned_buffers()) {\n used_values.push_back(p.first);\n }\n }\n absl::StrAppend(&output, \"\\nTotal bytes used: \", total_size, \" (\",\n HumanReadableNumBytes(total_size), \")\\n\");\n absl::StrAppend(&output, \"\\nUsed values:\\n\");\n absl::c_sort(used_values, &CompareHloValuesById);\n for (const HloValue* value : used_values) {\n absl::StrAppend(&output, value->ToString());\n }\n return output;\n}\nstd::vector> TopKPeakBuffers(\n uint64_t k, const std::vector allocations) {\n absl::btree_multimap topk;\n for (const BufferAllocation& allocation : allocations) {\n for (const HloValue* value : allocation.PeakMemoryLogicalBuffers()) {\n int64_t size = allocation.assigned_buffers().at(value).size;\n if (topk.size() < k) {\n topk.insert({size, value});\n } else {\n auto it = topk.begin();\n if (size > it->first) {\n topk.erase(it);\n topk.insert({size, value});\n }\n }\n }\n }\n std::vector> topk_descending;\n topk_descending.reserve(topk.size());\n absl::c_reverse_copy(topk, std::back_inserter(topk_descending));\n return topk_descending;\n}\nstd::string BufferAssignment::ToVerboseString(\n size_t max_buffers_to_show) const {\n std::string output =\n absl::StrCat(\"BufferAssignment OOM Debugging.\\n\", stats_.ToString());\n std::vector> peak_buffers =\n TopKPeakBuffers(max_buffers_to_show, allocations_);\n std::vector buf_strs;\n for (size_t i = 0; i < std::min(max_buffers_to_show, peak_buffers.size());\n ++i) {\n const HloValue* value = peak_buffers[i].second;\n const HloInstruction* instr = value->instruction();\n int64_t size = peak_buffers[i].first;\n buf_strs.push_back(absl::StrCat(\"\\n\\tBuffer \", i + 1, \":\\n\\t\\tSize: \",\n xla::HumanReadableNumBytes(size)));\n if (!instr->metadata().op_name().empty()) {\n buf_strs.push_back(absl::StrCat(\n \"\\n\\t\\tOperator: \", xla::OpMetadataToString(instr->metadata())));\n }\n if (instr->opcode() == HloOpcode::kParameter &&\n (instr->parent() == instr->GetModule()->entry_computation())) {\n buf_strs.push_back(absl::StrCat(\n \"\\n\\t\\tEntry Parameter Subshape: \",\n ShapeUtil::GetSubshape(instr->shape(), value->index()).ToString()));\n } else {\n buf_strs.push_back(\n absl::StrCat(\"\\n\\t\\tXLA Label: \", HloOpcodeString(instr->opcode()),\n \"\\n\\t\\tShape: \", value->shape().ToString()));\n }\n buf_strs.push_back(\"\\n\\t\\t==========================\\n\");\n }\n absl::StrAppend(&output, \"Peak buffers:\", absl::StrJoin(buf_strs, \"\"));\n return output;\n}\nstd::string BufferAssignment::BufferInfoString() const {\n std::string binfo;\n absl::StrAppend(&binfo,\n \"buffer_id,buffer_name,offset,size,\"\n \"definition_time,end_time,num_uses,use_times,use_names\\n\");\n const HloLiveRange& live_ranges = hlo_live_range();\n const auto& instruction_schedule = live_ranges.instruction_schedule();\n const auto& buffer_live_ranges = live_ranges.buffer_live_ranges();\n std::vector> buffers;\n for (const BufferAllocation& allocation : allocations_) {\n absl::c_copy(allocation.assigned_buffers(), std::back_inserter(buffers));\n }\n absl::c_sort(\n buffers,\n [](const std::pair& b1,\n const std::pair& b2) {\n return b1.first->id() < b2.first->id();\n });\n for (const auto& buffer_pair : buffers) {\n const HloValue& buffer = *buffer_pair.first;\n const BufferAllocation::OffsetSize& offset_size = buffer_pair.second;\n if (!buffer_live_ranges.contains(&buffer)) {\n continue;\n }\n std::vector> uses;\n uses.reserve(buffer.GetUses().size());\n for (const HloUse& use : buffer.GetUses()) {\n uses.emplace_back(instruction_schedule.at(use.instruction),\n use.ToString());\n }\n absl::c_sort(uses);\n std::vector use_positions;\n std::vector use_names;\n use_positions.reserve(uses.size());\n use_names.reserve(uses.size());\n for (const auto& use : uses) {\n use_positions.push_back(use.first);\n use_names.push_back(use.second);\n }\n const int64_t definition_time =\n instruction_schedule.at(buffer.defining_position().instruction);\n const int64_t end_t = buffer_live_ranges.at(&buffer).end;\n absl::StrAppend(&binfo, buffer.id(), \",\");\n absl::StrAppend(&binfo, \"\\\"\", buffer.ToShortString(), \"\\\",\");\n absl::StrAppend(&binfo, offset_size.offset, \",\");\n absl::StrAppend(&binfo, offset_size.size, \",\");\n absl::StrAppend(&binfo, definition_time, \",\");\n absl::StrAppend(&binfo, end_t, \",\");\n absl::StrAppend(&binfo, use_positions.size(), \",\");\n absl::StrAppend(&binfo, \"\\\"\", absl::StrJoin(use_positions, \";\"), \"\\\",\");\n absl::StrAppend(&binfo, \"\\\"\", absl::StrJoin(use_names, \";\"), \"\\\"\");\n absl::StrAppend(&binfo, \"\\n\");\n }\n return binfo;\n}\nBufferAssignmentProto BufferAssignment::ToProto() const {\n BufferAssignmentProto proto;\n const HloDataflowAnalysis& dataflow = this->dataflow_analysis();\n for (BufferValue::Id id = 0; id < dataflow.values().size(); id++) {\n auto& value = dataflow.values().at(id);\n if (HasAllocation(*value)) {\n LogicalBufferProto proto_buffer = value->ToProto(buffer_size_);\n proto.add_logical_buffers()->Swap(&proto_buffer);\n for (const HloValue* alias :\n alias_analysis().GetBufferContainingValue(*value).values()) {\n if (alias->instruction() == value->instruction() &&\n alias->index() == value->index()) {\n continue; \n }\n BufferAssignmentProto::BufferAlias* proto_alias =\n proto.add_buffer_aliases();\n LogicalBufferProto::Location proto_alias_location =\n BufferValue::ToLocationProto(*alias->instruction(), alias->index());\n proto_alias->set_source_buffer_id(value->id());\n proto_alias->mutable_location()->Swap(&proto_alias_location);\n }\n }\n }\n for (const BufferAllocation& allocation : Allocations()) {\n BufferAllocationProto proto_allocation = allocation.ToProto();\n proto.add_buffer_allocations()->Swap(&proto_allocation);\n for (const HeapSimulatorTrace& heap_trace : allocation.HeapTraces()) {\n *proto.add_heap_simulator_traces() = heap_trace;\n }\n }\n return proto;\n}\nabsl::StatusOr> BufferAssignment::FromProto(\n const BufferAssignmentProto& proto, const HloModule* module,\n BufferValue::SizeFunction buffer_size,\n HloDataflowAnalysis::CanShareBuffer can_share_buffer) {\n TF_ASSIGN_OR_RETURN(std::unique_ptr alias_analysis,\n HloAliasAnalysis::Run(module, can_share_buffer));\n auto id_to_hlo_instruction = BuildIdToHloInstructionMap(module);\n absl::flat_hash_map id_to_logical_buffer;\n TF_ASSIGN_OR_RETURN(\n id_to_logical_buffer,\n BuildIdToLogicalBufferMap(proto, id_to_hlo_instruction, alias_analysis));\n std::unique_ptr buffer_assignment =\n absl::WrapUnique(new BufferAssignment(\n module, nullptr, std::move(buffer_size),\n nullptr, std::move(alias_analysis),\n nullptr));\n for (const auto& alloc_proto : proto.buffer_allocations()) {\n BufferAllocation* allocation = buffer_assignment->NewEmptyAllocation(\n alloc_proto.size(), alloc_proto.color());\n CHECK(allocation->index() == alloc_proto.index())\n << \"Expected allocations in BufferAssignment proto to be sorted by \"\n \"index.\";\n allocation->set_is_thread_local(alloc_proto.is_thread_local());\n allocation->set_is_tuple(alloc_proto.is_tuple());\n allocation->set_constant(alloc_proto.is_constant());\n if (alloc_proto.is_entry_computation_parameter()) {\n std::vector shape_idx_vals;\n absl::c_copy(alloc_proto.parameter_shape_index(),\n std::back_inserter(shape_idx_vals));\n ShapeIndex shape_index(shape_idx_vals);\n allocation->set_entry_computation_parameter(\n alloc_proto.parameter_number(), shape_index, false);\n }\n for (const auto& assignee : alloc_proto.assigned()) {\n HloValue::Id logical_buffer_id = assignee.logical_buffer_id();\n const auto& buffer_val = id_to_logical_buffer[logical_buffer_id];\n buffer_assignment->AddAssignment(allocation, *buffer_val,\n assignee.offset(), assignee.size());\n }\n CHECK_EQ(allocation->maybe_live_out(), alloc_proto.maybe_live_out())\n << \"Dataflow analysis differs from proto.\";\n }\n TF_RET_CHECK(proto.logical_buffers_size() ==\n buffer_assignment->allocation_index_for_value_.size());\n for (auto& logical_buffer_proto : proto.logical_buffers()) {\n TF_RET_CHECK(buffer_assignment->HasAllocation(\n *id_to_logical_buffer[logical_buffer_proto.id()]));\n }\n return buffer_assignment;\n}\nabsl::StatusOr> BufferAssigner::Run(\n const HloModule* module, std::unique_ptr hlo_ordering,\n BufferValue::SizeFunction buffer_size,\n LogicalBuffer::AlignmentFunction color_alignment,\n bool allocate_buffers_for_constants, BufferAssigner::Colorer colorer,\n std::optional must_not_live_out,\n HloDataflowAnalysis::CanShareBuffer can_share_buffer,\n std::unique_ptr preset_assignments,\n const PrivateStacks& private_stacks,\n GlobalDecreasingSizeBestFitHeap::BufferIntervalCompare\n heap_buffer_interval_compare,\n std::optional isolation_options,\n std::optional temp_buffer_color) {\n BufferAssigner assigner(allocate_buffers_for_constants, std::move(colorer),\n must_not_live_out, std::move(preset_assignments));\n return assigner.CreateAssignment(\n module, std::move(hlo_ordering), std::move(buffer_size),\n std::move(color_alignment), std::move(can_share_buffer), private_stacks,\n heap_buffer_interval_compare, isolation_options, temp_buffer_color);\n}\nbool BufferAssigner::LiveRangeInterferes(const HloValue* buffer1,\n const HloValue* buffer2,\n BufferAssignment* assignment) {\n CHECK((assignment->hlo_live_range().total_order_scheduled()));\n const HloLiveRange& hlo_live_range = assignment->hlo_live_range();\n const auto& buffer_live_ranges = hlo_live_range.buffer_live_ranges();\n auto live_range_it1 = buffer_live_ranges.find(buffer1);\n CHECK(live_range_it1 != buffer_live_ranges.end())\n << \"Buffer doesn't have a proper live range:\" << buffer1->ToString();\n auto live_range_it2 = buffer_live_ranges.find(buffer2);\n CHECK(live_range_it2 != buffer_live_ranges.end())\n << \"Buffer doesn't have a proper live range:\" << buffer2->ToString();\n auto can_share_as_operand =\n [&assignment](const HloValue* user_value, const HloValue* operand_value,\n const HloLiveRange::TimeBound& operand_live_range) {\n HloPosition operand_end_position = operand_live_range.end_position;\n return user_value->instruction()->opcode() != HloOpcode::kCopy &&\n user_value->instruction()->IsUserOf(\n operand_end_position.instruction) &&\n assignment->dataflow_analysis().CanShareOperandBufferWithUser(\n operand_end_position.instruction, operand_end_position.index,\n user_value->instruction(), user_value->index());\n };\n const auto& live_range_1 = live_range_it1->second;\n const auto& live_range_2 = live_range_it2->second;\n if (!(live_range_1.start > live_range_2.end ||\n live_range_2.start > live_range_1.end)) {\n if (live_range_1.end == live_range_2.start) {\n auto operand_value = buffer1;\n auto user_value = buffer2;\n if (!can_share_as_operand(user_value, operand_value, live_range_1)) {\n VLOG(4) << \"End of live range of \" << buffer1->ToShortString()\n << \" is equal to the start of live range of \"\n << buffer2->ToShortString() << \", buffer cannot be shared.\";\n return true;\n }\n } else if (live_range_2.end == live_range_1.start) {\n auto operand_value = buffer2;\n auto user_value = buffer1;\n if (!can_share_as_operand(user_value, operand_value, live_range_2)) {\n VLOG(4) << \"End of live range of \" << buffer2->ToShortString()\n << \" is equal to the start of live range of \"\n << buffer1->ToShortString() << \", buffer cannot be shared.\";\n return true;\n }\n } else {\n VLOG(4) << \"Can't assign: assignee \" << *buffer1 << \" may interfere with \"\n << *buffer2;\n VLOG(4) << \"assigned_buffer.start: \" << live_range_1.start;\n VLOG(4) << \"assigned_buffer.end: \" << live_range_1.end;\n VLOG(4) << \"live_range_2.start\" << live_range_2.start;\n VLOG(4) << \"live_range_2.end\" << live_range_2.end;\n return true;\n }\n }\n return false;\n}\nbool BufferAssigner::MaybeAssignBuffer(BufferAllocation* allocation,\n const HloBuffer& hlo_buffer,\n BufferAssignment* assignment) {\n CHECK(!assignment->HasAllocation(hlo_buffer))\n << \"buffer \" << hlo_buffer << \" already has an allocation assigned.\";\n VLOG(4) << \"Trying to assign \" << hlo_buffer << \" size \"\n << assignment->HloBufferSize(hlo_buffer)\n << \" to allocation: \" << *allocation;\n if (hlo_buffer.color() != allocation->color()) {\n VLOG(4) << \"Can't assign: buffer has color \" << hlo_buffer.color()\n << \" and allocation has color \" << allocation->color() << \".\";\n return false;\n }\n if (assignment->HloBufferSize(hlo_buffer) > allocation->size()) {\n VLOG(4) << \"Can't assign: buffer is larger than allocation (\"\n << assignment->HloBufferSize(hlo_buffer) << \" > \"\n << allocation->size() << \")\";\n return false;\n }\n if (allocation->is_readonly()) {\n VLOG(4) << \"Can't assign: allocation is readonly\";\n return false;\n }\n if (must_not_live_out_.has_value()) {\n if (allocation->maybe_live_out()) {\n for (const HloValue* value : hlo_buffer.values()) {\n if ((*must_not_live_out_)(assignment->alias_analysis(),\n value->instruction(), value->index())) {\n VLOG(4) << \"Can't assign: \" << value->instruction()->ToString()\n << \" cannot live out of the module\";\n return false;\n }\n }\n }\n if (assignment->alias_analysis().BufferLivesOut(hlo_buffer)) {\n for (const auto& buffer_offset_size : allocation->assigned_buffers()) {\n const HloValue* value = buffer_offset_size.first;\n if ((*must_not_live_out_)(assignment->alias_analysis(),\n value->instruction(), value->index())) {\n VLOG(4) << \"Can't assign: \" << value->instruction()\n << \" cannot live out of the module\";\n return false;\n }\n }\n }\n }\n if (!allocation->is_reusable()) {\n VLOG(4) << \"Can't assign: allocation is not reusable\";\n return false;\n }\n for (const auto& buffer_offset_size : allocation->assigned_buffers()) {\n const HloValue& assigned_buffer =\n *CHECK_NOTNULL(dynamic_cast(buffer_offset_size.first));\n for (const HloValue* new_value : hlo_buffer.values()) {\n if (assignment->hlo_live_range().total_order_scheduled()) {\n if (LiveRangeInterferes(new_value, &assigned_buffer, assignment)) {\n VLOG(4) << \"Can't assign: assignee \" << assigned_buffer\n << \" live range interferes with \"\n << new_value->ToShortString();\n return false;\n }\n } else if (assignment->hlo_ordering().MayInterfere(\n assigned_buffer, *new_value,\n assignment->dataflow_analysis())) {\n VLOG(4) << \"Can't assign: assignee \" << assigned_buffer\n << \" may interfere with \" << new_value->ToShortString();\n return false;\n }\n if (new_value->instruction()->opcode() == HloOpcode::kCopy) {\n for (const HloPosition& assigned_buffer_position :\n assigned_buffer.positions()) {\n if (new_value->instruction()->IsUserOf(\n assigned_buffer_position.instruction)) {\n VLOG(4) << \"Can't assign: assignee \" << assigned_buffer\n << \" is used at copy instruction \"\n << new_value->ToShortString();\n return false;\n }\n }\n }\n }\n }\n if (assignment->alias_analysis().BufferLivesOut(hlo_buffer) &&\n allocation->size() != assignment->HloBufferSize(hlo_buffer)) {\n VLOG(4) << \"Can't assign: buffer \" << hlo_buffer\n << \"is live out and size not the same as allocation\";\n return false;\n }\n assignment->AddAssignment(allocation, hlo_buffer, 0,\n assignment->HloBufferSize(hlo_buffer));\n return true;\n} \nabsl::Status BufferAssigner::AssignSingleHloBuffer(\n const HloBuffer* hlo_buffer, bool is_thread_local,\n absl::flat_hash_map>*\n buffers_to_assign_sequentially,\n std::vector* allocation_indices,\n BufferAssignment* assignment) {\n const int64_t buffer_size = assignment->HloBufferSize(*hlo_buffer);\n for (const HloValue* value : hlo_buffer->values()) {\n if (value->instruction()->opcode() == HloOpcode::kConstant) {\n if (allocate_buffers_for_constants_) {\n BufferAllocation* allocation =\n assignment->NewAllocation(*hlo_buffer, buffer_size);\n allocation->set_constant(true);\n VLOG(3) << \"New allocation #\" << allocation->index() << \" for constant \"\n << *hlo_buffer << \" value ptr: \" << value;\n }\n VLOG(3) << \"Not allocating buffer for constant\";\n return absl::OkStatus();\n }\n const HloInstruction* instruction = value->instruction();\n const bool is_entry_parameter =\n instruction->opcode() == HloOpcode::kParameter &&\n instruction->parent() == instruction->GetModule()->entry_computation();\n if (is_entry_parameter) {\n bool parameter_has_alias =\n assignment->module().input_output_alias_config().ParameterHasAlias(\n instruction->parameter_number(), value->index());\n BufferAllocation* allocation =\n assignment->NewAllocation(*hlo_buffer, buffer_size);\n allocation->set_entry_computation_parameter(\n instruction->parameter_number(), value->index(), parameter_has_alias);\n if (parameter_has_alias) {\n allocation_indices->push_back(allocation->index());\n }\n VLOG(3) << \"New allocation #\" << allocation->index()\n << \" marked as entry computation parameter: \" << *hlo_buffer;\n return absl::OkStatus();\n }\n }\n if (is_thread_local) {\n BufferAllocation* allocation =\n assignment->NewAllocation(*hlo_buffer, buffer_size);\n allocation->set_is_thread_local(true);\n VLOG(3) << \"New allocation #\" << allocation->index()\n << \" for thread-local: \" << *hlo_buffer;\n return absl::OkStatus();\n }\n for (const HloValue* value : hlo_buffer->values()) {\n if (value->shape().IsTuple()) {\n BufferAllocation* allocation =\n assignment->NewAllocation(*hlo_buffer, buffer_size);\n allocation->set_is_tuple(true);\n VLOG(3) << \"New allocation #\" << allocation->index()\n << \" for tuple-shaped buffer: \" << *hlo_buffer;\n return absl::OkStatus();\n }\n if (value->IsTopLevel() && !value->IsTuple()) {\n const HloInstruction* instruction = value->instruction();\n for (auto* operand : instruction->operands()) {\n for (const auto& operand_slice :\n assignment->GetAllSlices(operand, {})) {\n BufferAllocation* allocation =\n assignment->GetMutableAllocation(operand_slice.index());\n if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) {\n VLOG(3) << \"Reusing (operand) allocation #\" << allocation->index()\n << \" for: \" << *hlo_buffer;\n return absl::OkStatus();\n }\n }\n }\n }\n }\n for (int allocation_index = allocation_indices->size() - 1;\n allocation_index >= 0; allocation_index--) {\n BufferAllocation* allocation = assignment->GetMutableAllocation(\n allocation_indices->at(allocation_index));\n if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) {\n VLOG(3) << \"Reusing allocation #\" << allocation->index()\n << \" for: \" << *hlo_buffer;\n return absl::OkStatus();\n }\n }\n if (!assignment->HasAllocation(*hlo_buffer) &&\n !assignment->alias_analysis().BufferLivesOut(*hlo_buffer)) {\n bool all_computations_have_sequential_order = true;\n for (const HloValue* hlo_value : hlo_buffer->values()) {\n HloComputation* computation = hlo_value->instruction()->parent();\n const bool has_sequential_order =\n assignment->hlo_ordering().SequentialOrder(*computation) != nullptr;\n all_computations_have_sequential_order &= has_sequential_order;\n }\n if (all_computations_have_sequential_order) {\n for (const HloValue* hlo_value : hlo_buffer->values()) {\n HloComputation* computation = hlo_value->instruction()->parent();\n (*buffers_to_assign_sequentially)[computation].insert(hlo_value);\n VLOG(3) << \"Delaying assignment of temp buffer: \" << *hlo_value;\n }\n return absl::OkStatus();\n }\n }\n if (!assignment->HasAllocation(*hlo_buffer)) {\n BufferAllocation* allocation =\n assignment->NewAllocation(*hlo_buffer, buffer_size);\n allocation_indices->push_back(allocation->index());\n VLOG(3) << \"New allocation #\" << allocation->index()\n << \" for: \" << *hlo_buffer;\n }\n TF_RET_CHECK(assignment->HasAllocation(*hlo_buffer));\n return absl::OkStatus();\n}\nabsl::Status BufferAssigner::AssignBuffersForComputations(\n const std::vector& computations,\n bool is_thread_local,\n absl::flat_hash_map>*\n buffers_to_assign_sequentially,\n BufferAssignment* assignment) {\n if (computations.empty()) {\n return absl::OkStatus();\n }\n std::vector sorted_buffers;\n absl::flat_hash_set preset_assigned_buffers;\n TF_RETURN_IF_ERROR(AssignPresetBuffers(&preset_assigned_buffers, assignment));\n const HloAliasAnalysis& alias_analysis = assignment->alias_analysis();\n for (const HloBuffer& buffer : alias_analysis.buffers()) {\n if (preset_assigned_buffers.find(&buffer) !=\n preset_assigned_buffers.end()) {\n VLOG(3) << \"Skip allocation for buffer: \" << buffer;\n continue;\n }\n TF_RET_CHECK(!buffer.values().empty());\n const HloComputation* comp = buffer.values()[0]->instruction()->parent();\n if (absl::c_linear_search(computations, comp)) {\n sorted_buffers.push_back(&buffer);\n }\n }\n flat_hash_map post_order_position;\n int position = 0;\n std::vector reverse_post_order_computations;\n std::unique_ptr call_graph =\n CallGraph::Build(computations[0]->parent());\n TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node) {\n if (absl::c_linear_search(computations, node.computation())) {\n reverse_post_order_computations.push_back(node.computation());\n }\n return absl::OkStatus();\n }));\n absl::c_reverse(reverse_post_order_computations);\n for (auto* computation : reverse_post_order_computations) {\n for (auto* instruction : computation->MakeInstructionPostOrder()) {\n post_order_position.emplace(instruction, position);\n position++;\n }\n }\n HloSchedule schedule(&assignment->module());\n for (const HloComputation* computation : computations) {\n const HloInstructionSequence* instruction_sequence =\n assignment->hlo_ordering().SequentialOrder(*computation);\n const bool has_sequential_order = instruction_sequence != nullptr;\n if (has_sequential_order && buffers_to_assign_sequentially != nullptr) {\n buffers_to_assign_sequentially->emplace(computation,\n flat_hash_set());\n schedule.set_sequence(computation, *instruction_sequence);\n }\n }\n absl::c_sort(\n sorted_buffers, [&post_order_position, &alias_analysis, assignment](\n const HloBuffer* a, const HloBuffer* b) {\n const int64_t a_size = assignment->HloBufferSize(*a);\n const int64_t b_size = assignment->HloBufferSize(*b);\n if (a_size != b_size) {\n return a_size > b_size; \n }\n const bool a_live_out = alias_analysis.BufferLivesOut(*a);\n const bool b_live_out = alias_analysis.BufferLivesOut(*b);\n if (a_live_out != b_live_out) {\n return a_live_out;\n }\n auto compare = [&post_order_position](const HloValue* value1,\n const HloValue* value2) {\n return post_order_position.at(value1->instruction()) <\n post_order_position.at(value2->instruction());\n };\n const HloValue* a_min = *absl::c_min_element(a->values(), compare);\n const HloValue* b_min = *absl::c_min_element(b->values(), compare);\n if (post_order_position.at(a_min->instruction()) <\n post_order_position.at(b_min->instruction())) {\n return true;\n } else if (post_order_position.at(a_min->instruction()) >\n post_order_position.at(b_min->instruction())) {\n return false;\n }\n return a->id() < b->id();\n });\n std::vector allocation_indices;\n for (const HloBuffer* buffer : sorted_buffers) {\n VLOG(3) << \"=================================================\";\n VLOG(3) << \"Assigning buffer for \" << *buffer;\n TF_RETURN_IF_ERROR(AssignSingleHloBuffer(buffer, is_thread_local,\n buffers_to_assign_sequentially,\n &allocation_indices, assignment));\n }\n return absl::OkStatus();\n}\nflat_hash_map>\nBufferAssigner::SplitBuffersByColor(\n const flat_hash_set& buffers) const {\n flat_hash_map> color_map;\n for (auto buffer : buffers) {\n color_map[buffer->color()].insert(buffer);\n }\n return color_map;\n}\nabsl::flat_hash_map>\nBufferAssigner::SplitBuffersByPrivateStackComputation(\n const absl::flat_hash_set& buffers,\n absl::Span private_stack_computations,\n const CallGraph& call_graph) const {\n absl::flat_hash_map>\n computation_map;\n for (const HloValue* value : buffers) {\n bool found_computation = false;\n for (const HloComputation* computation : private_stack_computations) {\n if (call_graph.InstructionIsNestedIn(value->instruction(), computation)) {\n found_computation = true;\n computation_map[computation].insert(value);\n break;\n }\n }\n CHECK(found_computation);\n }\n return computation_map;\n}\nabsl::Status BufferAssigner::AssignPresetBuffers(\n absl::flat_hash_set* assigned_buffers,\n BufferAssignment* assignment) {\n if (!preset_assignments_) {\n return absl::OkStatus();\n }\n absl::flat_hash_map\n preset_allocations;\n for (auto& color_and_info : preset_assignments_->assignment_informations()) {\n LogicalBuffer::Color color(color_and_info.first);\n auto inserted = preset_allocations.emplace(\n color,\n assignment->NewEmptyAllocation(color_and_info.second.size, color));\n BufferAllocation* inserted_allocation = inserted.first->second;\n inserted_allocation->AddHeapTrace(\n color_and_info.second.heap_simulator_trace);\n VLOG(3) << \"Created preset buffer allocation \"\n << inserted_allocation->index()\n << \", color: \" << inserted_allocation->color()\n << \", size: \" << inserted_allocation->size();\n }\n const HloAliasAnalysis& alias_analysis = assignment->alias_analysis();\n for (auto& position_and_chunk : preset_assignments_->chunks()) {\n const HloPosition& defining_position = position_and_chunk.first;\n const HloBuffer& buffer = alias_analysis.GetUniqueBufferAt(\n defining_position.instruction, defining_position.index);\n for (const HloValue* value : buffer.values()) {\n VLOG(3) << \"Preset allocation for value: \" << value->ToShortString();\n const HeapSimulator::Chunk& chunk = position_and_chunk.second;\n auto preset_allocations_iter = preset_allocations.find(value->color());\n CHECK(preset_allocations_iter != preset_allocations.end())\n << \"No preset value allocation for color \" << value->color()\n << \" for \" << value->ToShortString() << \" found.\";\n preset_allocations_iter->second->AddAssignment(*value, chunk.offset,\n chunk.size);\n }\n assigned_buffers->insert(&buffer);\n }\n preset_assignments_ = {};\n return absl::OkStatus();\n}\nabsl::Status BufferAssigner::AssignBuffersWithSequentialOrdering(\n const flat_hash_map>&\n buffers_to_assign_sequentially,\n bool run_whole_module_heap_simulation, BufferAssignment* assignment,\n const PrivateStacks& private_stacks,\n GlobalDecreasingSizeBestFitHeap::BufferIntervalCompare\n heap_buffer_interval_compare,\n std::optional isolation_options) {\n const HloOrdering& hlo_ordering = assignment->hlo_ordering();\n auto get_heap_algorithm =\n [&](int64_t alignment) -> std::unique_ptr> {\n if (heap_buffer_interval_compare) {\n return std::make_unique(\n assignment->multiheap_size_constraint_per_heap(), alignment,\n GlobalDecreasingSizeBestFitHeap::kCustom,\n heap_buffer_interval_compare);\n }\n auto algorithms = std::make_unique<\n std::vector>>>();\n algorithms->push_back(\n std::make_unique(\n assignment->multiheap_size_constraint_per_heap(), alignment,\n GlobalDecreasingSizeBestFitHeap::kSpatial));\n algorithms->push_back(\n std::make_unique(\n assignment->multiheap_size_constraint_per_heap(), alignment,\n GlobalDecreasingSizeBestFitHeap::kTemporal));\n return std::make_unique>(\n std::move(algorithms));\n };\n if (run_whole_module_heap_simulation) {\n VLOG(1) << \"Running whole-module heap simulation\";\n HloSchedule schedule(&assignment->module());\n flat_hash_set all_buffers_to_assign;\n for (const auto& pair : buffers_to_assign_sequentially) {\n const HloComputation* computation = pair.first;\n const flat_hash_set& buffers_to_assign = pair.second;\n const HloInstructionSequence* instruction_sequence =\n hlo_ordering.SequentialOrder(*computation);\n CHECK(instruction_sequence != nullptr) << computation->name();\n schedule.set_sequence(computation, *instruction_sequence);\n all_buffers_to_assign.insert(buffers_to_assign.begin(),\n buffers_to_assign.end());\n }\n auto color_map = SplitBuffersByColor(all_buffers_to_assign);\n std::vector sorted_colors;\n sorted_colors.reserve(color_map.size());\n for (auto& single_colored_set : color_map) {\n auto color = single_colored_set.first;\n sorted_colors.emplace(sorted_colors.end(), color);\n }\n absl::c_sort(sorted_colors);\n for (auto color : sorted_colors) {\n VLOG(2) << \"Simulating heap for color \" << color;\n int64_t alignment = assignment->color_alignment_(color);\n HeapSimulator::Options options;\n options.alloc_constants = allocate_buffers_for_constants_;\n auto private_stacks_it = private_stacks.find(color);\n if (private_stacks_it != private_stacks.end()) {\n auto computation_map = SplitBuffersByPrivateStackComputation(\n color_map[color], private_stacks_it->second,\n assignment->alias_analysis().dataflow_analysis().call_graph());\n for (const HloComputation* private_stack_computation :\n private_stacks_it->second) {\n VLOG(2) << \"private stack computation: \"\n << private_stack_computation->name();\n auto computation_map_it =\n computation_map.find(private_stack_computation);\n CHECK(computation_map_it != computation_map.end());\n options.buffers_to_assign = &computation_map_it->second;\n const HloInstructionSequence* instruction_sequence =\n hlo_ordering.SequentialOrder(*private_stack_computation);\n TF_ASSIGN_OR_RETURN(\n HeapSimulator::Result result,\n HeapSimulator::Run(\n get_heap_algorithm(alignment), *private_stack_computation,\n *instruction_sequence, assignment->alias_analysis(),\n assignment->buffer_size_, &schedule, options));\n AssignBuffersFromHeapSimulator(result, assignment, color,\n isolation_options);\n }\n } else {\n options.buffers_to_assign = &color_map[color];\n TF_ASSIGN_OR_RETURN(\n HeapSimulator::Result result,\n HeapSimulator::Run(get_heap_algorithm(alignment),\n assignment->module(), schedule,\n assignment->alias_analysis(),\n assignment->buffer_size_, options));\n AssignBuffersFromHeapSimulator(result, assignment, color,\n isolation_options);\n }\n }\n } else {\n VLOG(1) << \"Running per-computation heap simulation\";\n for (const auto& pair : buffers_to_assign_sequentially) {\n const HloComputation* computation = pair.first;\n const flat_hash_set& buffers_to_assign = pair.second;\n const HloInstructionSequence* instruction_sequence =\n hlo_ordering.SequentialOrder(*computation);\n CHECK(instruction_sequence != nullptr) << computation->name();\n auto color_map = SplitBuffersByColor(buffers_to_assign);\n std::vector sorted_colors;\n sorted_colors.reserve(color_map.size());\n for (auto& single_colored_set : color_map) {\n auto color = single_colored_set.first;\n sorted_colors.emplace(sorted_colors.end(), color);\n }\n absl::c_sort(sorted_colors);\n for (auto color : sorted_colors) {\n VLOG(2) << \"Simulating heap for color \" << color;\n int64_t alignment = assignment->color_alignment_(color);\n HeapSimulator::Options options;\n options.buffers_to_assign = &color_map[color];\n TF_ASSIGN_OR_RETURN(\n HeapSimulator::Result result,\n HeapSimulator::Run(get_heap_algorithm(alignment), *computation,\n *instruction_sequence,\n assignment->alias_analysis(),\n assignment->buffer_size_, options));\n AssignBuffersFromHeapSimulator(result, assignment, color,\n isolation_options);\n }\n }\n }\n return absl::OkStatus();\n}\nnamespace {\nstd::vector ComputePeakMemoryLogicalBuffers(\n const BufferAllocation& allocation, const HeapSimulatorTrace& heap_trace) {\n absl::flat_hash_map id_to_value;\n absl::flat_hash_map buffer_sizes;\n for (const auto& pair : allocation.assigned_buffers()) {\n const HloValue* value = pair.first;\n const BufferAllocation::OffsetSize& offset_size = pair.second;\n id_to_value[value->id()] = value;\n buffer_sizes[value] = offset_size.size;\n }\n VLOG(1) << \"Compute peak memory logical buffers\";\n absl::flat_hash_map num_outstanding_shared_buffers;\n absl::flat_hash_map shared_canonical_ids;\n absl::flat_hash_map allocated_sizes;\n auto memory_delta = [&](const HeapSimulatorTrace::Event& event) -> int64_t {\n const HloValue* buffer = id_to_value.at(event.buffer_id());\n const int64_t buffer_size = buffer_sizes.at(buffer);\n if (event.kind() == HeapSimulatorTrace::Event::ALLOC) {\n num_outstanding_shared_buffers[event.buffer_id()] = 1;\n allocated_sizes[event.buffer_id()] = buffer_size;\n return buffer_size;\n } else if (event.kind() == HeapSimulatorTrace::Event::SHARE_WITH) {\n shared_canonical_ids[event.buffer_id()] = event.share_with_canonical_id();\n if (++num_outstanding_shared_buffers[event.share_with_canonical_id()] ==\n 1) {\n allocated_sizes[event.buffer_id()] = buffer_size;\n return buffer_size;\n }\n allocated_sizes[event.buffer_id()] = 0;\n return 0;\n } else if (event.kind() == HeapSimulatorTrace::Event::FREE) {\n auto shared_canonical_id_it =\n shared_canonical_ids.find(event.buffer_id());\n int64_t buffer_id = (shared_canonical_id_it == shared_canonical_ids.end())\n ? event.buffer_id()\n : shared_canonical_id_it->second;\n --num_outstanding_shared_buffers[buffer_id];\n return -1 * allocated_sizes[event.buffer_id()];\n }\n LOG(FATAL) << \"Unknown event kind: \" << event.kind();\n };\n int64_t max_live_size = 0;\n int64_t live_size = 0;\n for (const auto& event : heap_trace.events()) {\n if (!id_to_value.contains(event.buffer_id())) {\n continue;\n }\n live_size += memory_delta(event);\n if (max_live_size < live_size) {\n max_live_size = live_size;\n }\n }\n absl::flat_hash_set live_values;\n live_size = 0;\n num_outstanding_shared_buffers.clear();\n for (const auto& event : heap_trace.events()) {\n if (!id_to_value.contains(event.buffer_id())) {\n continue;\n }\n const HloValue* value = id_to_value.at(event.buffer_id());\n int64_t delta = memory_delta(event);\n if (delta > 0) {\n InsertOrDie(&live_values, value);\n } else if (delta < 0) {\n CHECK(ContainsKey(live_values, value));\n live_values.erase(value);\n }\n live_size += delta;\n if (live_size == max_live_size) {\n break;\n }\n }\n CHECK_EQ(live_size, max_live_size);\n std::vector live_values_vector;\n live_values_vector.insert(live_values_vector.end(), live_values.begin(),\n live_values.end());\n absl::c_sort(live_values_vector, [](const HloValue* a, const HloValue* b) {\n return a->id() < b->id();\n });\n VLOG(4) << \"Peak memory buffer:\";\n for (auto value : live_values_vector) {\n VLOG(4) << \" \" << value->ToString();\n }\n return live_values_vector;\n}\n} \nvoid BufferAssigner::IsolateHeapBuffers(\n std::optional isolation_options,\n const BufferAssignment* assignment, LogicalBuffer::Color color,\n HeapSimulator::Result& result) const {\n if (!isolation_options) {\n return;\n }\n result.heap_size = 0;\n for (HeapSimulator::HeapResult& heap_result : result.heap_results) {\n if (absl::c_find(isolation_options->config.isolation_colors(), color) !=\n isolation_options->config.isolation_colors().end()) {\n VLOG(1) << \"Isolating color: \" << color;\n int64_t alignment = assignment->color_alignment_(color);\n std::vector sorted_values;\n sorted_values.reserve(heap_result.chunk_map.size());\n for (const auto& [value, chunk] : heap_result.chunk_map) {\n sorted_values.push_back(value);\n }\n absl::c_sort(sorted_values, isolation_options->hlo_value_compare);\n int64_t isolation_offset =\n RoundUpTo(isolation_options->config.base_offset_bytes() +\n heap_result.heap_size +\n isolation_options->config.isolation_padding_bytes(),\n alignment);\n int64_t value_index;\n for (value_index = 0;\n value_index < std::min(static_cast(sorted_values.size()),\n isolation_options->config.isolation_fuel());\n ++value_index) {\n const HloValue* value = sorted_values[value_index];\n HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value);\n VLOG(1) << \"Isolating \" << value->ToShortString() << \" from \"\n << chunk.offset << \" to \" << isolation_offset;\n chunk.offset = isolation_offset;\n isolation_offset += RoundUpTo(\n chunk.size + isolation_options->config.isolation_padding_bytes(),\n alignment);\n }\n for (; value_index < sorted_values.size(); ++value_index) {\n const HloValue* value = sorted_values[value_index];\n HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value);\n int64_t new_offset = RoundUpTo(\n chunk.offset + isolation_options->config.base_offset_bytes(),\n alignment);\n VLOG(1) << \"Not isolating \" << value->ToShortString() << \", from \"\n << chunk.offset << \" to \" << new_offset;\n chunk.offset = new_offset;\n }\n heap_result.heap_size = isolation_offset;\n }\n result.heap_size += heap_result.heap_size;\n }\n}\nvoid BufferAssigner::AssignBuffersFromHeapSimulator(\n HeapSimulator::Result& result, BufferAssignment* assignment,\n BufferValue::Color color,\n std::optional isolation_options) {\n IsolateHeapBuffers(isolation_options, assignment, color, result);\n if (assignment->stats_.preallocated_temp_fragmentation_bytes == -1) {\n assignment->stats_.preallocated_temp_fragmentation_bytes =\n result.fragmentation_size;\n } else {\n assignment->stats_.preallocated_temp_fragmentation_bytes +=\n result.fragmentation_size;\n }\n VLOG(1) << \"Result size from heap simulator: \" << result.heap_size;\n for (const HeapSimulator::HeapResult& heap_result :\n result.heap_results) {\n BufferAllocation* allocation =\n assignment->NewEmptyAllocation(heap_result.heap_size, color);\n for (const auto& [value, chunk] : heap_result.chunk_map) {\n assignment->AddAssignment(allocation, *value, chunk.offset, chunk.size);\n }\n allocation->peak_buffers_ =\n ComputePeakMemoryLogicalBuffers(*allocation, result.debug_trace);\n XLA_VLOG_LINES(2, allocation->ToString());\n allocation->AddHeapTrace(result.debug_trace);\n }\n}\nabsl::StatusOr>\nBufferAssigner::CreateAssignment(\n const HloModule* module, std::unique_ptr hlo_ordering,\n BufferValue::SizeFunction buffer_size,\n LogicalBuffer::AlignmentFunction color_alignment,\n HloDataflowAnalysis::CanShareBuffer can_share_buffer,\n const PrivateStacks& private_stacks,\n GlobalDecreasingSizeBestFitHeap::BufferIntervalCompare\n heap_buffer_interval_compare,\n std::optional isolation_options,\n std::optional temp_buffer_color) {\n TF_ASSIGN_OR_RETURN(std::unique_ptr alias_analysis,\n HloAliasAnalysis::Run(module, can_share_buffer));\n HloSchedule schedule(module);\n for (const HloComputation* computation : module->computations()) {\n const HloInstructionSequence* instruction_sequence =\n hlo_ordering->SequentialOrder(*computation);\n const bool has_sequential_order = instruction_sequence != nullptr;\n if (has_sequential_order) {\n schedule.set_sequence(computation, *instruction_sequence);\n }\n }\n TF_ASSIGN_OR_RETURN(std::unique_ptr hlo_live_range,\n HloLiveRange::Run(schedule, *alias_analysis,\n module->entry_computation(), true));\n VLOG(1) << \"Assigning buffers to module \" << module->name();\n XLA_VLOG_LINES(3, module->ToString());\n XLA_VLOG_LINES(3, alias_analysis->ToString());\n XLA_VLOG_LINES(3, alias_analysis->dataflow_analysis().ToString());\n VLOG(1) << \"Number of buffers to assign: \"\n << alias_analysis->buffers().size();\n std::unique_ptr assignment(new BufferAssignment(\n module, std::move(hlo_ordering), std::move(buffer_size),\n std::move(color_alignment), std::move(alias_analysis),\n std::move(hlo_live_range)));\n TF_RETURN_IF_ERROR(\n colorer_(&assignment->alias_analysis(), assignment->hlo_ordering()));\n VLOG(3) << \"After coloring:\";\n XLA_VLOG_LINES(3,\n assignment->alias_analysis().dataflow_analysis().ToString());\n std::vector thread_local_computations;\n std::vector global_computations;\n TF_RETURN_IF_ERROR(GatherComputationsByAllocationType(\n module, &thread_local_computations, &global_computations));\n flat_hash_map>\n buffers_to_assign_sequentially;\n TF_RETURN_IF_ERROR(AssignBuffersForComputations(\n global_computations,\n false, &buffers_to_assign_sequentially,\n assignment.get()));\n const bool run_whole_module_heap_simulation =\n buffers_to_assign_sequentially.size() == global_computations.size();\n VLOG(2) << \"Running whole module heap simulation: \"\n << run_whole_module_heap_simulation;\n const int32_t multiheap_size_constraint_per_heap =\n module->config().debug_options().xla_multiheap_size_constraint_per_heap();\n VLOG(2) << \"Multiheap per heap size limit: \"\n << multiheap_size_constraint_per_heap;\n TF_RETURN_IF_ERROR(AssignBuffersWithSequentialOrdering(\n buffers_to_assign_sequentially, run_whole_module_heap_simulation,\n assignment.get(), private_stacks, heap_buffer_interval_compare,\n isolation_options));\n std::vector thread_local_computations_no_fusion;\n for (auto* computation : thread_local_computations) {\n TF_RET_CHECK(computation != module->entry_computation());\n if (computation->IsFusionComputation()) {\n continue;\n }\n thread_local_computations_no_fusion.push_back(computation);\n }\n TF_RETURN_IF_ERROR(AssignBuffersForComputations(\n thread_local_computations_no_fusion,\n true,\n nullptr, assignment.get()));\n for (const HloBuffer* buffer :\n assignment->alias_analysis().LiveOutBuffers()) {\n VLOG(3) << \"maybe_live_out LogicalBuffer: \" << *buffer;\n if (assignment->HasAllocation(*buffer)) {\n BufferAllocation* alloc =\n assignment->GetMutableAssignedAllocation(*buffer);\n alloc->set_maybe_live_out(true);\n VLOG(3) << \"maybe_live_out BufferAllocation: \" << *alloc;\n }\n }\n absl::flat_hash_set private_stack_colors;\n for (const auto& [color, computations] : private_stacks) {\n private_stack_colors.insert(color);\n }\n assignment->CombineTempAllocations(private_stack_colors, temp_buffer_color);\n XLA_VLOG_LINES(2, assignment->ToString());\n TF_RETURN_IF_ERROR(assignment->ComputeSummaryStats());\n XLA_VLOG_LINES(1, assignment->GetStats().ToString());\n VLOG(1) << \"Buffer assignment done.\";\n return std::move(assignment);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/buffer_assignment.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/comparison_util.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/ir/hlo_schedule.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/buffer_value.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/copy_insertion.h\"\n#include \"xla/service/flatten_call_graph.h\"\n#include \"xla/service/hlo.pb.h\"\n#include \"xla/service/hlo_alias_analysis.h\"\n#include \"xla/service/hlo_buffer.h\"\n#include \"xla/service/hlo_memory_scheduler.h\"\n#include \"xla/service/hlo_ordering.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/hlo_value.h\"\n#include \"xla/service/logical_buffer.h\"\n#include \"xla/service/memory_space_assignment/memory_space_assignment.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing memory_space_assignment::PresetAssignments;\nusing ::testing::UnorderedElementsAre;\nclass InstructionListVisitor : public DfsHloVisitorWithDefault {\n public:\n explicit InstructionListVisitor(const HloInstruction* root) : root_(root) {}\n absl::Status DefaultAction(HloInstruction* hlo) override {\n instructions_.push_back(hlo);\n VLOG(0) << \"List instruction \" << hlo->ToString();\n return absl::OkStatus();\n }\n std::vector GetInstructions() { return instructions_; }\n private:\n const HloInstruction* root_;\n std::vector instructions_;\n InstructionListVisitor(const InstructionListVisitor&) = delete;\n InstructionListVisitor& operator=(const InstructionListVisitor&) = delete;\n};\nconst std::vector GetInstructions(HloInstruction* root) {\n InstructionListVisitor main_list(root);\n TF_CHECK_OK(root->Accept(&main_list));\n return main_list.GetInstructions();\n}\nclass BufferAssignmentTest : public HloTestBase {\n protected:\n ~BufferAssignmentTest() override {}\n std::unique_ptr RunBufferAssignment(HloModule* module,\n int64_t alignment = 1) {\n return BufferAssigner::Run(\n module, std::make_unique(module),\n backend().compiler()->BufferSizeBytesFunction(),\n [alignment](LogicalBuffer::Color) { return alignment; },\n true)\n .value();\n }\n absl::StatusOr> ConvertToProtoAndBack(\n const BufferAssignment* buffers, const HloModule* module) {\n auto proto = buffers->ToProto();\n return BufferAssignment::FromProto(\n proto, module, backend().compiler()->BufferSizeBytesFunction(),\n nullptr);\n }\n std::unique_ptr RunBufferAssignmentWithSequentialOrdering(\n HloModule* module, int64_t alignment = 1,\n BufferAssigner::Colorer colorer = BufferAssigner::DefaultColorer(),\n const BufferAssigner::PrivateStacks& private_stacks = {},\n std::optional\n isolation_options = std::nullopt) {\n return BufferAssigner::Run(\n module,\n std::make_unique(module->schedule()),\n backend().compiler()->BufferSizeBytesFunction(),\n [alignment](LogicalBuffer::Color) { return alignment; },\n true, colorer,\n std::nullopt, nullptr,\n {}, private_stacks,\n nullptr, isolation_options)\n .value();\n }\n std::unique_ptr RunBufferAssignmentNoBuffersForConstants(\n HloModule* module, int64_t alignment = 1) {\n return BufferAssigner::Run(\n module, std::make_unique(module),\n backend().compiler()->BufferSizeBytesFunction(),\n [alignment](LogicalBuffer::Color) { return alignment; },\n false)\n .value();\n }\n std::unique_ptr RunBufferAssignmentNoBuffersReuseForAdd(\n HloModule* module, int64_t alignment = 1) {\n auto must_not_live_out = [](const HloAliasAnalysis& alias_analysis,\n const HloInstruction* instruction,\n const ShapeIndex&) {\n return instruction->opcode() == HloOpcode::kAdd;\n };\n return BufferAssigner::Run(\n module, std::make_unique(module),\n backend().compiler()->BufferSizeBytesFunction(),\n [alignment](LogicalBuffer::Color) { return alignment; },\n false,\n BufferAssigner::DefaultColorer(),\n must_not_live_out)\n .value();\n }\n std::unique_ptr RunColoredBufferAssignment(\n HloModule* module, BufferAssigner::Colorer colorer,\n int64_t alignment = 1) {\n return BufferAssigner::Run(\n module, std::make_unique(module),\n backend().compiler()->BufferSizeBytesFunction(),\n [alignment](LogicalBuffer::Color) { return alignment; },\n true, std::move(colorer))\n .value();\n }\n std::unique_ptr RunBufferAssignmentWithInstructionSequence(\n HloModule* module, absl::Span instruction_sequence,\n int64_t alignment = 1) {\n HloSchedule schedule(module);\n schedule.set_sequence(module->entry_computation(), instruction_sequence);\n return BufferAssigner::Run(\n module, std::make_unique(schedule),\n backend().compiler()->BufferSizeBytesFunction(),\n [alignment](LogicalBuffer::Color) { return alignment; },\n true)\n .value();\n }\n std::unique_ptr RunBufferAssignmentWithPresetAssignments(\n HloModule* module, std::unique_ptr preset_assignments,\n int64_t alignment = 1) {\n return BufferAssigner::Run(\n module, std::make_unique(module),\n backend().compiler()->BufferSizeBytesFunction(),\n [alignment](LogicalBuffer::Color) { return alignment; },\n true,\n BufferAssigner::DefaultColorer(),\n std::nullopt,\n nullptr, std::move(preset_assignments))\n .value();\n }\n std::unique_ptr RunBufferAssignmentWithIsolationOptions(\n HloModule* module, std::optional\n isolation_options = std::nullopt) {\n return BufferAssigner::Run(\n module,\n std::make_unique(module->schedule()),\n backend().compiler()->BufferSizeBytesFunction(),\n [](LogicalBuffer::Color) { return 1; },\n true,\n BufferAssigner::DefaultColorer(),\n std::nullopt, nullptr,\n {}, {},\n nullptr, isolation_options)\n .value();\n }\n std::unique_ptr BuildMapComputationPlus1(\n const std::string& name) {\n auto builder = HloComputation::Builder(name);\n auto param =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"x\"));\n auto value = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n builder.AddInstruction(\n HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, value));\n return builder.Build();\n }\n std::unique_ptr BuildReduceComputation(\n const std::string& name) {\n auto builder = HloComputation::Builder(name);\n auto param =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"x\"));\n auto param2 =\n builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, \"y\"));\n builder.AddInstruction(\n HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, param2));\n return builder.Build();\n }\n std::unique_ptr BuildWhileConditionComputation(\n const std::string& name) {\n auto builder = HloComputation::Builder(name);\n auto const4 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(4)));\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, t_s32_f32v4_, \"x\"));\n auto index = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(const4->shape(), param, 0));\n builder.AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), index,\n const4, ComparisonDirection::kLt));\n return builder.Build();\n }\n std::unique_ptr BuildWhileBodyComputation(\n const std::string& name) {\n auto builder = HloComputation::Builder(name);\n auto const1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1)));\n auto constv = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({1.1f, 2.2f, 3.3f, 4.4f})));\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, t_s32_f32v4_, \"x\"));\n auto indexc = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(const1->shape(), param, 0));\n auto addc = builder.AddInstruction(HloInstruction::CreateBinary(\n indexc->shape(), HloOpcode::kAdd, indexc, const1));\n auto indexv = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(constv->shape(), param, 1));\n auto addv = builder.AddInstruction(HloInstruction::CreateBinary(\n constv->shape(), HloOpcode::kAdd, indexv, constv));\n builder.AddInstruction(HloInstruction::CreateTuple({addc, addv}));\n return builder.Build();\n }\n std::unique_ptr BuildR0F32UnaryOpComputation(\n HloOpcode opcode, const std::string& name) {\n auto builder = HloComputation::Builder(name);\n auto param =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"x\"));\n builder.AddInstruction(HloInstruction::CreateUnary(r0f32_, opcode, param));\n return builder.Build();\n }\n const BufferAllocation& GetAssignedInputAllocation(\n const BufferAssignment& buffers, HloInstruction* hlo) {\n LOG(INFO) << \"Checking input: \" << hlo->ToString();\n const BufferAllocation& buffer =\n *buffers.GetUniqueTopLevelSlice(hlo).value().allocation();\n EXPECT_EQ(hlo->parameter_number(), buffer.parameter_number());\n return buffer;\n }\n const BufferAllocation& GetAssignedOutputAllocation(\n const BufferAssignment& buffers, HloInstruction* hlo) {\n LOG(INFO) << \"Checking output: \" << hlo->ToString();\n const BufferAllocation& buffer = GetTopLevelAllocation(buffers, hlo);\n return buffer;\n }\n const BufferAllocation& GetAllocation(const BufferAssignment& buffers,\n const HloInstruction* hlo,\n const ShapeIndex& index) {\n return *buffers.GetUniqueSlice(hlo, index).value().allocation();\n }\n const BufferAllocation& GetTopLevelAllocation(const BufferAssignment& buffers,\n const HloInstruction* hlo) {\n return *buffers.GetUniqueTopLevelSlice(hlo).value().allocation();\n }\n int64_t ValidateBuffers(\n const std::vector& instructions,\n const BufferAssignment& buffers) {\n for (const HloInstruction* hlo : instructions) {\n if (!buffers.HasTopLevelAllocation(hlo)) {\n EXPECT_TRUE(HloOpcode::kConstant == hlo->opcode() ||\n HloOpcode::kParameter == hlo->opcode());\n continue;\n }\n }\n int64_t total_size = 0;\n for (auto& allocation : buffers.Allocations()) {\n total_size += allocation.size();\n }\n return total_size;\n }\n Shape s32_ = ShapeUtil::MakeShape(xla::S32, {});\n Shape r0f32_ = ShapeUtil::MakeShape(xla::F32, {});\n Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});\n Shape f32vec10_ = ShapeUtil::MakeShape(F32, {10});\n Shape f32vec100_ = ShapeUtil::MakeShape(F32, {100});\n Shape f32a100x10_ = ShapeUtil::MakeShape(F32, {100, 10});\n Shape t_s32_f32v4_ = ShapeUtil::MakeTupleShape({s32_, f32vec4_});\n Shape t_s32_f32v10_ = ShapeUtil::MakeTupleShape({s32_, f32vec10_});\n};\nstatic bool BuffersDistinct(const std::vector& a,\n const std::vector& b,\n const BufferAssignment& assignment) {\n absl::flat_hash_set a_slices;\n for (const HloInstruction* instruction : a) {\n if (assignment.HasTopLevelAllocation(instruction)) {\n a_slices.insert(assignment.GetUniqueTopLevelSlice(instruction).value());\n }\n }\n for (const HloInstruction* instruction : b) {\n if (assignment.HasTopLevelAllocation(instruction)) {\n if (a_slices.contains(\n assignment.GetUniqueTopLevelSlice(instruction).value())) {\n return false;\n }\n }\n }\n return true;\n}\nTEST_F(BufferAssignmentTest, ScalarConstant) {\n auto builder = HloComputation::Builder(TestName());\n auto const0 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n {\n auto buffers = RunBufferAssignment(module.get());\n EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));\n }\n {\n auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());\n EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));\n }\n}\nTEST_F(BufferAssignmentTest, BufferForConst) {\n auto builder = HloComputation::Builder(TestName());\n auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({1.1f, 2.2f, 3.3f, 4.4f})));\n auto const1 = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({4.1f, 4.2f, 4.3f, 4.4f})));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, const0, const1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n {\n auto buffers = RunBufferAssignment(module.get());\n EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));\n EXPECT_TRUE(buffers->HasTopLevelAllocation(const1));\n GetAssignedOutputAllocation(*buffers, add);\n }\n {\n auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());\n EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));\n EXPECT_FALSE(buffers->HasTopLevelAllocation(const1));\n GetAssignedOutputAllocation(*buffers, add);\n }\n}\nTEST_F(BufferAssignmentTest, HasAllocationAt) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"param0\"));\n auto constant = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1)));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));\n auto tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({negate, param0, constant}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers = RunBufferAssignment(module.get());\n EXPECT_EQ(buffers->HasTopLevelAllocation(tuple),\n buffers->HasAllocationAt(tuple, {}));\n EXPECT_EQ(buffers->HasTopLevelAllocation(negate),\n buffers->HasAllocationAt(tuple, {0}));\n EXPECT_EQ(buffers->HasTopLevelAllocation(param0),\n buffers->HasAllocationAt(tuple, {1}));\n EXPECT_EQ(buffers->HasTopLevelAllocation(constant),\n buffers->HasAllocationAt(tuple, {2}));\n}\nTEST_F(BufferAssignmentTest, BufferForOutputConst) {\n auto builder = HloComputation::Builder(TestName());\n auto const0 = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({1.1f, 2.2f, 3.3f, 4.4f})));\n auto copy = builder.AddInstruction(\n HloInstruction::CreateUnary(const0->shape(), HloOpcode::kCopy, const0));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers = RunBufferAssignment(module.get());\n GetAssignedOutputAllocation(*buffers, copy);\n}\nTEST_F(BufferAssignmentTest, Basic) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));\n auto sub = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kSubtract, add, param1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers_orig = RunBufferAssignment(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr buffers,\n ConvertToProtoAndBack(buffers_orig.get(), module.get()));\n BufferAllocation paramscalar_buffer =\n GetAssignedInputAllocation(*buffers, paramscalar);\n BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);\n BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);\n EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());\n EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());\n EXPECT_NE(param0_buffer.index(), param1_buffer.index());\n const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);\n EXPECT_NE(mul_buffer.index(), param0_buffer.index());\n const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);\n EXPECT_EQ(add_buffer.index(), mul_buffer.index());\n GetAssignedOutputAllocation(*buffers, sub);\n}\nTEST_F(BufferAssignmentTest, BasicToFromProto) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));\n builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kSubtract, add, param1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers_orig = RunBufferAssignment(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr buffers_from_proto,\n ConvertToProtoAndBack(buffers_orig.get(), module.get()));\n const HloDataflowAnalysis& dataflow_orig = buffers_orig->dataflow_analysis();\n const HloDataflowAnalysis& dataflow_proto =\n buffers_from_proto->dataflow_analysis();\n EXPECT_EQ(buffers_orig->Allocations().size(),\n buffers_from_proto->Allocations().size());\n for (BufferValue::Id id = 0; id < dataflow_orig.values().size(); id++) {\n auto& orig_value = dataflow_orig.values().at(id);\n if (buffers_orig->HasAllocation(*orig_value)) {\n auto& value_proto = dataflow_proto.GetUniqueValueAt(\n orig_value->instruction(), orig_value->index());\n EXPECT_TRUE(buffers_from_proto->HasAllocation(value_proto));\n EXPECT_EQ(orig_value->color(), value_proto.color());\n EXPECT_EQ(buffers_orig->GetAssignedAllocation(*orig_value).index(),\n buffers_from_proto->GetAssignedAllocation(value_proto).index());\n }\n }\n}\nTEST_F(BufferAssignmentTest, AliasedParamCanBeReused) {\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"p0\"));\n auto neg_1 = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param));\n auto neg_2 = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, neg_1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({}, 0, {}));\n auto buffers_orig = RunBufferAssignment(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr buffers,\n ConvertToProtoAndBack(buffers_orig.get(), module.get()));\n BufferAllocation param_buffer = GetAssignedInputAllocation(*buffers, param);\n BufferAllocation neg_1_buffer = GetAllocation(*buffers, neg_1, {});\n BufferAllocation neg_2_buffer = GetAllocation(*buffers, neg_2, {});\n EXPECT_EQ(param_buffer.index(), neg_1_buffer.index());\n EXPECT_EQ(neg_2_buffer.index(), neg_1_buffer.index());\n}\nTEST_F(BufferAssignmentTest, AddCannotReuse) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));\n auto sub = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kSubtract, add, param1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers_orig = RunBufferAssignmentNoBuffersReuseForAdd(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr buffers,\n ConvertToProtoAndBack(buffers_orig.get(), module.get()));\n BufferAllocation paramscalar_buffer =\n GetAssignedInputAllocation(*buffers, paramscalar);\n BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);\n BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);\n EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());\n EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());\n EXPECT_NE(param0_buffer.index(), param1_buffer.index());\n const BufferAllocation& sub_buffer = GetTopLevelAllocation(*buffers, sub);\n EXPECT_NE(sub_buffer.index(), param0_buffer.index());\n const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);\n EXPECT_NE(add_buffer.index(), sub_buffer.index());\n GetAssignedOutputAllocation(*buffers, sub);\n}\nTEST_F(BufferAssignmentTest, BasicUniquelyColored) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));\n auto sub = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kSubtract, add, param1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n absl::flat_hash_map color_map;\n auto colorer = [&](HloAliasAnalysis* alias_analysis, const HloOrdering&) {\n int color = 0;\n for (HloValue::Id id = 0;\n id < alias_analysis->dataflow_analysis().values().size(); id++) {\n auto& value = alias_analysis->dataflow_analysis().GetValue(id);\n color_map[value.defining_instruction()] = color;\n value.set_color(BufferValue::Color(color++));\n }\n return absl::OkStatus();\n };\n auto buffers = RunColoredBufferAssignment(module.get(), colorer);\n BufferAllocation paramscalar_buffer =\n GetAssignedInputAllocation(*buffers, paramscalar);\n BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);\n BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);\n EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());\n EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());\n EXPECT_NE(param0_buffer.index(), param1_buffer.index());\n const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);\n EXPECT_NE(mul_buffer.index(), param0_buffer.index());\n const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);\n EXPECT_NE(add_buffer.index(), mul_buffer.index());\n GetAssignedOutputAllocation(*buffers, sub);\n EXPECT_EQ(param0->shape().layout().memory_space(), color_map[param0]);\n EXPECT_EQ(param1->shape().layout().memory_space(), color_map[param1]);\n EXPECT_EQ(mul->shape().layout().memory_space(), color_map[mul]);\n EXPECT_EQ(add->shape().layout().memory_space(), color_map[add]);\n EXPECT_EQ(sub->shape().layout().memory_space(), color_map[sub]);\n}\nTEST_F(BufferAssignmentTest, BasicPartiallyColored) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));\n auto sub = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kSubtract, add, param1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {\n for (HloValue::Id id = 0;\n id < alias_analysis->dataflow_analysis().values().size(); id++) {\n auto& value = alias_analysis->dataflow_analysis().GetValue(id);\n auto& buffer = alias_analysis->GetBufferContainingValue(value);\n for (const auto& alias : buffer.values()) {\n if (alias->instruction()->opcode() == HloOpcode::kAdd ||\n alias->instruction()->opcode() == HloOpcode::kMultiply) {\n value.set_color(LogicalBuffer::Color(1));\n }\n }\n if (!value.has_color()) {\n value.set_color(LogicalBuffer::Color(0));\n }\n }\n return absl::OkStatus();\n };\n auto buffers = RunColoredBufferAssignment(module.get(), colorer);\n BufferAllocation paramscalar_buffer =\n GetAssignedInputAllocation(*buffers, paramscalar);\n BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);\n BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);\n EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());\n EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());\n EXPECT_NE(param0_buffer.index(), param1_buffer.index());\n const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);\n EXPECT_NE(mul_buffer.index(), param0_buffer.index());\n const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);\n EXPECT_EQ(add_buffer.index(), mul_buffer.index());\n GetAssignedOutputAllocation(*buffers, sub);\n EXPECT_EQ(mul->shape().layout().memory_space(), 1);\n EXPECT_EQ(add->shape().layout().memory_space(), 1);\n EXPECT_EQ(sub->shape().layout().memory_space(), 0);\n EXPECT_EQ(param0->shape().layout().memory_space(), 0);\n EXPECT_EQ(param1->shape().layout().memory_space(), 0);\n}\nTEST_F(BufferAssignmentTest, PresetAssignments) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n Shape f32vec100_color1 = ShapeUtil::MakeShapeWithDenseLayout(\n F32, {100}, {0}, {}, 1,\n 0,\n 1);\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_color1, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_color1, HloOpcode::kAdd, mul, param1));\n auto sub = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kSubtract, add, param1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto preset_assignments = std::make_unique();\n preset_assignments->add_chunk({mul, {}},\n HeapSimulator::Chunk::FromOffsetSize(100, 400));\n preset_assignments->add_chunk({add, {}},\n HeapSimulator::Chunk::FromOffsetSize(550, 400));\n preset_assignments->assignment_information_for_space(1)\n ->size = 950;\n auto buffers = RunBufferAssignmentWithPresetAssignments(\n module.get(), std::move(preset_assignments));\n BufferAllocation paramscalar_buffer =\n GetAssignedInputAllocation(*buffers, paramscalar);\n BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);\n BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1);\n EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());\n EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index());\n EXPECT_EQ(paramscalar_buffer.color(), LogicalBuffer::Color(0));\n EXPECT_NE(param0_buffer.index(), param1_buffer.index());\n EXPECT_EQ(param0_buffer.color(), LogicalBuffer::Color(0));\n const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);\n const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);\n EXPECT_EQ(mul_buffer, add_buffer);\n EXPECT_NE(mul_buffer.index(), param0_buffer.index());\n EXPECT_EQ(mul_buffer.color(), LogicalBuffer::Color(1));\n EXPECT_EQ(mul_buffer.assigned_buffers().size(), 2);\n for (const auto& value_and_offsetsize : mul_buffer.assigned_buffers()) {\n if (value_and_offsetsize.first->instruction() == mul) {\n EXPECT_EQ(value_and_offsetsize.second.offset, 100);\n EXPECT_EQ(value_and_offsetsize.second.size, 400);\n } else {\n EXPECT_EQ(value_and_offsetsize.first->instruction(), add);\n EXPECT_EQ(value_and_offsetsize.second.offset, 550);\n EXPECT_EQ(value_and_offsetsize.second.size, 400);\n }\n }\n GetAssignedOutputAllocation(*buffers, sub);\n}\nTEST_F(BufferAssignmentTest, PresetAssignmentsWhile) {\n auto module = CreateNewVerifiedModule();\n Shape f32vec10_color1 = ShapeUtil::MakeShapeWithDenseLayout(\n F32, {10}, {0}, {}, 1,\n 0,\n 1);\n Shape t_s32_f32v10_color1 =\n ShapeUtil::MakeTupleShape({s32_, f32vec10_color1});\n auto cond_builder = HloComputation::Builder(\"WhileCond\");\n HloInstruction* cond_param = cond_builder.AddInstruction(\n HloInstruction::CreateParameter(0, t_s32_f32v10_color1, \"cond_param\"));\n HloInstruction* cond_iter = cond_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(s32_, cond_param, 0));\n HloInstruction* cond_limit = cond_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(50)));\n cond_builder.AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,\n cond_limit, ComparisonDirection::kLt));\n HloComputation* cond_computation =\n module->AddEmbeddedComputation(cond_builder.Build());\n auto body_builder = HloComputation::Builder(\"WhileBody\");\n HloInstruction* body_param = body_builder.AddInstruction(\n HloInstruction::CreateParameter(0, t_s32_f32v10_color1, \"body_param\"));\n HloInstruction* body_iter = body_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(s32_, body_param, 0));\n HloInstruction* body_data = body_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(f32vec10_color1, body_param, 1));\n HloInstruction* body_data_increment = body_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR1(\n {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f})));\n HloInstruction* body_data_next =\n body_builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec10_color1, HloOpcode::kAdd, body_data, body_data_increment));\n HloInstruction* body_iter_increment = body_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1)));\n HloInstruction* body_iter_next =\n body_builder.AddInstruction(HloInstruction::CreateBinary(\n s32_, HloOpcode::kAdd, body_iter, body_iter_increment));\n body_builder.AddInstruction(\n HloInstruction::CreateTuple({body_iter_next, body_data_next}));\n HloComputation* body_computation =\n module->AddEmbeddedComputation(body_builder.Build());\n auto builder = HloComputation::Builder(TestName());\n HloInstruction* iter = builder.AddInstruction(\n HloInstruction::CreateParameter(0, s32_, \"param_iter\"));\n HloInstruction* data = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec10_, \"param_data\"));\n HloInstruction* negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec10_color1, HloOpcode::kNegate, data));\n HloInstruction* tuple =\n builder.AddInstruction(HloInstruction::CreateTuple({iter, negate}));\n HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(\n t_s32_f32v10_color1, cond_computation, body_computation, tuple));\n HloInstruction* while_data = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(f32vec10_color1, while_op, 1));\n builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec10_, HloOpcode::kAdd, while_data, data));\n module->AddEntryComputation(builder.Build());\n auto preset_assignments = std::make_unique();\n preset_assignments->add_chunk({negate, {}},\n HeapSimulator::Chunk::FromOffsetSize(100, 40));\n preset_assignments->assignment_information_for_space(1)\n ->size = 140;\n auto buffers_orig = RunBufferAssignmentWithPresetAssignments(\n module.get(), std::move(preset_assignments));\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr buffers,\n ConvertToProtoAndBack(buffers_orig.get(), module.get()));\n const BufferAllocation& data_buffer = GetTopLevelAllocation(*buffers, negate);\n EXPECT_EQ(data_buffer.assigned_buffers().size(), 5);\n for (const auto& value_and_offsetsize : data_buffer.assigned_buffers()) {\n EXPECT_EQ(value_and_offsetsize.second.offset, 100);\n EXPECT_EQ(value_and_offsetsize.second.size, 40);\n EXPECT_EQ(value_and_offsetsize.first->color(), LogicalBuffer::Color(1));\n }\n}\nTEST_F(BufferAssignmentTest, MultipleUsersForNode) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));\n auto sub = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kSubtract, add, mul));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers_orig = RunBufferAssignment(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr buffers,\n ConvertToProtoAndBack(buffers_orig.get(), module.get()));\n BufferAllocation paramscalar_buffer =\n GetAssignedInputAllocation(*buffers, paramscalar);\n BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);\n BufferAllocation param1_index = GetAssignedInputAllocation(*buffers, param1);\n EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index());\n EXPECT_NE(paramscalar_buffer.index(), param1_index.index());\n EXPECT_NE(param0_buffer.index(), param1_index.index());\n const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);\n const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add);\n EXPECT_NE(add_buffer.index(), mul_buffer.index());\n const std::vector level0 = GetInstructions(sub);\n int64_t size0 = ValidateBuffers(level0, *buffers);\n LOG(INFO) << \"LogicalBuffer count \" << buffers->Allocations().size()\n << \" for \" << level0.size() << \" instructions; \"\n << \"total buffer size \" << size0;\n}\nTEST_F(BufferAssignmentTest, TrivialMap) {\n auto module = CreateNewVerifiedModule();\n auto map_computation =\n module->AddEmbeddedComputation(BuildMapComputationPlus1(\"f32+1\"));\n auto inner_last = map_computation->root_instruction();\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32a100x10_, \"p\"));\n auto map = builder.AddInstruction(\n HloInstruction::CreateMap(f32a100x10_, {param0}, map_computation));\n module->AddEntryComputation(builder.Build());\n const std::vector level0 = GetInstructions(map);\n EXPECT_EQ(2, level0.size()) << \"Invalid main kernel size\";\n const std::vector level1 = GetInstructions(inner_last);\n EXPECT_EQ(3, level1.size()) << \"Invalid nested add+1 size\";\n auto buffers = RunBufferAssignment(module.get());\n int64_t size0 = ValidateBuffers(level0, *buffers);\n int64_t size1 = ValidateBuffers(level1, *buffers);\n EXPECT_TRUE(BuffersDistinct(level0, level1, *buffers))\n << \"Reuse between main kernel and embedded mapping.\";\n BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0);\n BufferAllocation map_buffer = GetAssignedOutputAllocation(*buffers, map);\n EXPECT_NE(param0_buffer.index(), map_buffer.index());\n EXPECT_EQ(HloOpcode::kAdd, inner_last->opcode());\n const BufferAllocation& inner_add_buffer =\n GetTopLevelAllocation(*buffers, inner_last);\n EXPECT_NE(inner_add_buffer.index(), map_buffer.index());\n LOG(INFO) << \"LogicalBuffer count \" << buffers->Allocations().size()\n << \" for \" << level0.size() + level1.size() << \" instructions; \"\n << \"total buffer size \" << size0 + size1;\n}\nTEST_F(BufferAssignmentTest, CannotReuseInputBufferOfReduce) {\n auto module = CreateNewVerifiedModule();\n auto reduce_computation =\n module->AddEmbeddedComputation(BuildReduceComputation(\"f32+f32\"));\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32a100x10_, \"p\"));\n auto exp1 = builder.AddInstruction(\n HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, param0));\n auto exp2 = builder.AddInstruction(\n HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, exp1));\n auto const0 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f)));\n auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(\n f32vec10_,\n exp2,\n const0,\n {0}, reduce_computation));\n auto exp3 = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec10_, HloOpcode::kExp, reduce));\n module->AddEntryComputation(builder.Build());\n auto buffers = RunBufferAssignment(module.get());\n const std::vector instrs = GetInstructions(exp3);\n ValidateBuffers(instrs, *buffers);\n const BufferAllocation& exp1_buffer = GetTopLevelAllocation(*buffers, exp1);\n const BufferAllocation& exp2_buffer = GetTopLevelAllocation(*buffers, exp2);\n const BufferAllocation& reduce_buffer =\n GetTopLevelAllocation(*buffers, reduce);\n EXPECT_EQ(exp1_buffer.index(), exp2_buffer.index());\n EXPECT_NE(exp2_buffer.index(), reduce_buffer.index());\n}\nTEST_F(BufferAssignmentTest, ExampleWhile) {\n auto module = CreateNewVerifiedModule();\n auto condition_computation =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"if<4\"));\n auto body_computation =\n module->AddEmbeddedComputation(BuildWhileBodyComputation(\"add-update\"));\n auto builder = HloComputation::Builder(TestName());\n auto const3 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));\n auto const4 = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({1.1f, 2.2f, 3.3f, 4.4f})));\n auto tuple =\n builder.AddInstruction(HloInstruction::CreateTuple({const3, const4}));\n auto while_op = builder.AddInstruction(HloInstruction::CreateWhile(\n t_s32_f32v4_, condition_computation, body_computation, tuple));\n module->AddEntryComputation(builder.Build());\n const std::vector level0 = GetInstructions(while_op);\n EXPECT_EQ(4, level0.size()) << \"Invalid while kernel size\";\n const std::vector levelc =\n GetInstructions(condition_computation->root_instruction());\n EXPECT_EQ(4, levelc.size()) << \"Invalid nested condition size\";\n const std::vector levelb =\n GetInstructions(body_computation->root_instruction());\n EXPECT_EQ(8, levelb.size()) << \"Invalid nested body size\";\n auto buffers = RunBufferAssignment(module.get());\n int64_t size0 = ValidateBuffers(level0, *buffers);\n int64_t sizec = ValidateBuffers(levelc, *buffers);\n int64_t sizeb = ValidateBuffers(levelb, *buffers);\n EXPECT_FALSE(BuffersDistinct(level0, levelc, *buffers))\n << \"Should be reuse between main kernel and embedded condition.\";\n EXPECT_FALSE(BuffersDistinct(levelb, levelc, *buffers))\n << \"Should be reuse between embedded condition and body.\";\n EXPECT_FALSE(BuffersDistinct(level0, levelb, *buffers))\n << \"Should be reuse between main kernel and embedded body.\";\n HloInstruction* body_root = body_computation->root_instruction();\n EXPECT_EQ(HloOpcode::kTuple, body_root->opcode());\n ShapeUtil::ForEachSubshape(\n while_op->shape(),\n [this, &buffers, while_op, body_root](const Shape& ,\n const ShapeIndex& index) {\n auto while_op_allocation = GetAllocation(*buffers, while_op, index);\n auto body_root_allocation = GetAllocation(*buffers, body_root, index);\n EXPECT_EQ(while_op_allocation.index(), body_root_allocation.index());\n });\n LOG(INFO) << \"LogicalBuffer count \" << buffers->Allocations().size()\n << \" for \" << level0.size() + levelc.size() + levelb.size()\n << \" instructions; total buffer size \" << size0 + sizec + sizeb;\n}\nTEST_F(BufferAssignmentTest, ExampleConditional) {\n auto module = CreateNewVerifiedModule();\n auto true_computation = module->AddEmbeddedComputation(\n BuildR0F32UnaryOpComputation(HloOpcode::kCeil, \"Ceil\"));\n auto false_computation = module->AddEmbeddedComputation(\n BuildR0F32UnaryOpComputation(HloOpcode::kFloor, \"Floor\"));\n auto builder = HloComputation::Builder(TestName());\n auto pred = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(false)));\n auto const1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(56.4f)));\n auto const2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(12.4f)));\n auto conditional = builder.AddInstruction(HloInstruction::CreateConditional(\n r0f32_, pred, const1, true_computation, const2, false_computation));\n module->AddEntryComputation(builder.Build());\n const std::vector conditional_instrs =\n GetInstructions(conditional);\n const std::vector true_instrs =\n GetInstructions(true_computation->root_instruction());\n const std::vector false_instrs =\n GetInstructions(false_computation->root_instruction());\n EXPECT_EQ(4, conditional_instrs.size());\n EXPECT_EQ(2, true_instrs.size());\n EXPECT_EQ(2, false_instrs.size());\n auto buffers = RunBufferAssignment(module.get());\n ValidateBuffers(conditional_instrs, *buffers);\n ValidateBuffers(true_instrs, *buffers);\n ValidateBuffers(false_instrs, *buffers);\n EXPECT_FALSE(BuffersDistinct(conditional_instrs, true_instrs, *buffers))\n << \"Should be reuse between conditional and true computation.\";\n EXPECT_FALSE(BuffersDistinct(conditional_instrs, false_instrs, *buffers))\n << \"Should be reuse between conditional and false computation.\";\n EXPECT_FALSE(BuffersDistinct(true_instrs, false_instrs, *buffers))\n << \"Should be reuse between true and false computations.\";\n const BufferAllocation& conditional_buffer =\n GetTopLevelAllocation(*buffers, conditional);\n const BufferAllocation& true_buffer =\n GetTopLevelAllocation(*buffers, true_computation->root_instruction());\n const BufferAllocation& false_buffer =\n GetTopLevelAllocation(*buffers, false_computation->root_instruction());\n EXPECT_EQ(conditional_buffer.size(), true_buffer.size());\n EXPECT_EQ(conditional_buffer.size(), false_buffer.size());\n}\nTEST_F(BufferAssignmentTest, UnaryOpReuseChain) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"p\"));\n auto exp1 = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, param0));\n auto tanh = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kTanh, exp1));\n auto exp2 = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, tanh));\n auto neg = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, exp2));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_TRUE(assignment->HasTopLevelAllocation(exp1));\n auto& buffer_for_exp1 = GetTopLevelAllocation(*assignment, exp1);\n EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, tanh));\n EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, exp2));\n EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, neg));\n}\nTEST_F(BufferAssignmentTest, ReuseNonOperandBuffer) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"param0\"));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));\n auto slice = builder.AddInstruction(\n HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast));\n auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast);\n EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate));\n EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice));\n}\nTEST_F(BufferAssignmentTest, NoReuseLiveBuffer) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"param0\"));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));\n auto slice = builder.AddInstruction(\n HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));\n builder.AddInstruction(HloInstruction::CreateTuple({negate, broadcast}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment_orig = RunBufferAssignment(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr assignment,\n ConvertToProtoAndBack(assignment_orig.get(), module.get()));\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, negate));\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, slice));\n EXPECT_NE(GetTopLevelAllocation(*assignment, negate),\n GetTopLevelAllocation(*assignment, slice));\n}\nTEST_F(BufferAssignmentTest, NoReuseAliasedBuffer) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"param0\"));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));\n auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({negate}));\n auto tuple_element = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(f32vec100_, tuple, 0));\n auto slice = builder.AddInstruction(\n HloInstruction::CreateSlice(f32vec10_, tuple_element, {0}, {10}, {1}));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32a100x10_, slice, {1}));\n builder.AddInstruction(HloInstruction::CreateTuple({tuple, broadcast}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment_orig = RunBufferAssignment(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr assignment,\n ConvertToProtoAndBack(assignment_orig.get(), module.get()));\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, negate));\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, slice));\n EXPECT_NE(GetTopLevelAllocation(*assignment, negate),\n GetTopLevelAllocation(*assignment, slice));\n}\nTEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBuffer) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"param0\"));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));\n auto slice = builder.AddInstruction(\n HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));\n auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(\n ShapeUtil::MakeShape(F32, {10, 4}), slice, {0}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, negate));\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, slice));\n}\nTEST_F(BufferAssignmentTest, ReuseOutputBufferIfExactlySized) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"param0\"));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));\n auto slice = builder.AddInstruction(\n HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));\n auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(\n ShapeUtil::MakeShape(F32, {10, 10}), slice, {0}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast));\n auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast);\n EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate));\n EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice));\n}\nTEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBufferInTuple) {\n auto builder = HloComputation::Builder(TestName());\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"param0\"));\n auto negate = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0));\n auto slice = builder.AddInstruction(\n HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1}));\n auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast(\n ShapeUtil::MakeShape(F32, {10, 4}), slice, {0}));\n builder.AddInstruction(HloInstruction::CreateTuple({broadcast}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, negate));\n EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast),\n GetTopLevelAllocation(*assignment, slice));\n}\nTEST_F(BufferAssignmentTest, EmbeddedComputationBuffers) {\n auto module = CreateNewVerifiedModule();\n auto vec_shape = ShapeUtil::MakeShape(F32, {42});\n auto scalar_shape = ShapeUtil::MakeShape(F32, {});\n auto map_builder = HloComputation::Builder(TestName() + \"_map\");\n auto map_param = map_builder.AddInstruction(\n HloInstruction::CreateParameter(0, scalar_shape, \"map_param\"));\n auto map_root = map_builder.AddInstruction(\n HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param));\n auto map_computation = module->AddEmbeddedComputation(map_builder.Build());\n auto call_builder = HloComputation::Builder(TestName() + \"_call\");\n auto call_param = call_builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec_shape, \"vec_param\"));\n auto call_root = call_builder.AddInstruction(\n HloInstruction::CreateUnary(vec_shape, HloOpcode::kExp, call_param));\n auto call_computation = module->AddEmbeddedComputation(call_builder.Build());\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec_shape, \"param\"));\n auto call = builder.AddInstruction(\n HloInstruction::CreateCall(vec_shape, {param}, call_computation));\n auto map = builder.AddInstruction(\n HloInstruction::CreateMap(vec_shape, {call}, map_computation));\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param);\n EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter());\n EXPECT_FALSE(map_param_alloc.maybe_live_out());\n EXPECT_TRUE(map_param_alloc.is_thread_local());\n auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root);\n EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter());\n EXPECT_FALSE(map_root_alloc.maybe_live_out());\n EXPECT_TRUE(map_root_alloc.is_thread_local());\n auto& call_param_alloc = GetTopLevelAllocation(*assignment, call_param);\n EXPECT_TRUE(call_param_alloc.is_entry_computation_parameter());\n EXPECT_FALSE(call_param_alloc.maybe_live_out());\n EXPECT_FALSE(call_param_alloc.is_thread_local());\n auto& call_root_alloc = GetTopLevelAllocation(*assignment, call_root);\n EXPECT_FALSE(call_root_alloc.is_entry_computation_parameter());\n EXPECT_FALSE(call_root_alloc.is_thread_local());\n auto& param_alloc = GetTopLevelAllocation(*assignment, param);\n EXPECT_TRUE(param_alloc.is_entry_computation_parameter());\n EXPECT_FALSE(param_alloc.maybe_live_out());\n EXPECT_FALSE(param_alloc.is_thread_local());\n auto& map_alloc = GetTopLevelAllocation(*assignment, map);\n EXPECT_FALSE(map_alloc.is_entry_computation_parameter());\n EXPECT_TRUE(map_alloc.maybe_live_out());\n EXPECT_FALSE(map_alloc.is_thread_local());\n}\nTEST_F(BufferAssignmentTest, CustomCallEmbeddedComputationBuffers) {\n auto module = CreateNewVerifiedModule();\n auto scalar_shape = ShapeUtil::MakeShape(F32, {});\n auto map_builder = HloComputation::Builder(TestName() + \"_map\");\n auto map_param = map_builder.AddInstruction(\n HloInstruction::CreateParameter(0, scalar_shape, \"map_param\"));\n auto map_root = map_builder.AddInstruction(\n HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param));\n auto map_computation = module->AddEmbeddedComputation(map_builder.Build());\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, scalar_shape, \"param\"));\n builder.AddInstruction(HloInstruction::CreateCustomCall(\n scalar_shape, {param}, map_computation, \"call_name\"));\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param);\n EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter());\n EXPECT_FALSE(map_param_alloc.maybe_live_out());\n EXPECT_TRUE(map_param_alloc.is_thread_local());\n auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root);\n EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter());\n EXPECT_FALSE(map_root_alloc.maybe_live_out());\n EXPECT_TRUE(map_root_alloc.is_thread_local());\n}\nTEST_F(BufferAssignmentTest, TupleParameterAsOutput) {\n auto builder = HloComputation::Builder(TestName());\n auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0,\n ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),\n ShapeUtil::MakeShape(F32, {}),\n ShapeUtil::MakeShape(S32, {42})}),\n \"param0\"));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(4, assignment->Allocations().size());\n ShapeUtil::ForEachSubshape(\n tuple_param->shape(),\n [this, &assignment, tuple_param](const Shape& ,\n const ShapeIndex& index) {\n auto allocation = GetAllocation(*assignment, tuple_param, index);\n EXPECT_TRUE(allocation.is_entry_computation_parameter());\n EXPECT_EQ(0, allocation.parameter_number());\n EXPECT_TRUE(allocation.maybe_live_out());\n });\n}\nTEST_F(BufferAssignmentTest, ElementOfNestedTupleParameterAsOutput) {\n auto builder = HloComputation::Builder(TestName());\n auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0,\n ShapeUtil::MakeTupleShape(\n {ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),\n ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {42}),\n ShapeUtil::MakeShape(S32, {101})})}),\n \"param0\"));\n auto tuple_element =\n builder.AddInstruction(HloInstruction::CreateGetTupleElement(\n ShapeUtil::GetSubshape(tuple_param->shape(), {1}), tuple_param, 1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_FALSE(\n GetAllocation(*assignment, tuple_param, {}).maybe_live_out());\n EXPECT_TRUE(\n GetAllocation(*assignment, tuple_param, {1}).maybe_live_out());\n EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 0})\n .maybe_live_out());\n EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 1})\n .maybe_live_out());\n EXPECT_TRUE(\n GetTopLevelAllocation(*assignment, tuple_element).maybe_live_out());\n EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 0}),\n GetAllocation(*assignment, tuple_element, {0}));\n EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 1}),\n GetAllocation(*assignment, tuple_element, {1}));\n EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1}),\n GetTopLevelAllocation(*assignment, tuple_element));\n}\nTEST_F(BufferAssignmentTest, TupleConstantAsOutput) {\n auto builder = HloComputation::Builder(TestName());\n Literal elements[] = {LiteralUtil::CreateR0(0),\n LiteralUtil::CreateR0(1)};\n builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::MakeTuple({&elements[0], &elements[1]})));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(3, assignment->Allocations().size());\n}\nTEST_F(BufferAssignmentTest, TupleCustomCallAsOutput) {\n auto builder = HloComputation::Builder(TestName());\n auto custom_call = builder.AddInstruction(HloInstruction::CreateCustomCall(\n ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}),\n ShapeUtil::MakeShape(S32, {101})}),\n {}, \"foo_function\"));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(3, assignment->Allocations().size());\n EXPECT_TRUE(\n GetAllocation(*assignment, custom_call, {}).maybe_live_out());\n EXPECT_TRUE(\n GetAllocation(*assignment, custom_call, {0}).maybe_live_out());\n EXPECT_TRUE(\n GetAllocation(*assignment, custom_call, {1}).maybe_live_out());\n}\nTEST_F(BufferAssignmentTest, CustomCallAliasedBuffer) {\n const char* const kModuleString = R\"(\n HloModule xla_computation_f\n ENTRY xla_computation_f {\n parameter.1 = f32[2,3,4,5] parameter(0)\n parameter.2 = f32[2,3,4,5] parameter(1)\n add = f32[2,3,4,5] add(parameter.1, parameter.2)\n ROOT custom-call = f32[2,3,4,5] custom-call(add, parameter.2), custom_call_target=\"dm_softmax\", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule(kModuleString));\n std::unique_ptr assignment =\n RunBufferAssignment(module.get());\n HloInstruction* custom_call = module->entry_computation()->root_instruction();\n EXPECT_TRUE(\n assignment->SharesTopLevelSlice(custom_call, custom_call->operand(0)));\n}\nTEST_F(BufferAssignmentTest, TupleCallAsOutput) {\n auto module = CreateNewVerifiedModule();\n auto elem_shape = f32vec4_;\n auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape});\n auto sub_builder = HloComputation::Builder(TestName() + \"_sub\");\n auto sub_param = sub_builder.AddInstruction(\n HloInstruction::CreateParameter(0, elem_shape, \"sub_param\"));\n auto sub_tuple =\n sub_builder.AddInstruction(HloInstruction::CreateTuple({sub_param}));\n auto sub_computation = module->AddEmbeddedComputation(sub_builder.Build());\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, elem_shape, \"param\"));\n auto call = builder.AddInstruction(\n HloInstruction::CreateCall(tuple_shape, {param}, sub_computation));\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(2, assignment->Allocations().size());\n EXPECT_EQ(GetAllocation(*assignment, call, {}),\n GetAllocation(*assignment, sub_tuple, {}));\n EXPECT_EQ(GetAllocation(*assignment, call, {0}),\n GetAllocation(*assignment, sub_param, {}));\n EXPECT_NE(GetTopLevelAllocation(*assignment, param),\n GetTopLevelAllocation(*assignment, sub_tuple));\n EXPECT_EQ(GetTopLevelAllocation(*assignment, param),\n GetTopLevelAllocation(*assignment, sub_param));\n}\nTEST_F(BufferAssignmentTest, TupleChainedCallAsOutput) {\n auto module = CreateNewVerifiedModule();\n auto elem_shape = f32vec4_;\n auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape});\n auto d_builder = HloComputation::Builder(TestName() + \"_d\");\n auto d_param = d_builder.AddInstruction(\n HloInstruction::CreateParameter(0, tuple_shape, \"d_param\"));\n auto d_computation = d_builder.Build();\n auto c_builder = HloComputation::Builder(TestName() + \"_c\");\n auto c_param = c_builder.AddInstruction(\n HloInstruction::CreateParameter(0, tuple_shape, \"c_param\"));\n auto c_call = c_builder.AddInstruction(\n HloInstruction::CreateCall(tuple_shape, {c_param}, d_computation.get()));\n auto c_computation = c_builder.Build();\n auto b_builder = HloComputation::Builder(TestName() + \"_b\");\n auto b_param = b_builder.AddInstruction(\n HloInstruction::CreateParameter(0, tuple_shape, \"b_param\"));\n auto b_call = b_builder.AddInstruction(\n HloInstruction::CreateCall(tuple_shape, {b_param}, c_computation.get()));\n auto b_computation = b_builder.Build();\n auto a_builder = HloComputation::Builder(TestName());\n auto a_param = a_builder.AddInstruction(\n HloInstruction::CreateParameter(0, elem_shape, \"param\"));\n auto a_tuple =\n a_builder.AddInstruction(HloInstruction::CreateTuple({a_param}));\n auto a_call = a_builder.AddInstruction(\n HloInstruction::CreateCall(tuple_shape, {a_tuple}, b_computation.get()));\n auto a_computation = a_builder.Build();\n module->AddEmbeddedComputation(std::move(d_computation));\n module->AddEmbeddedComputation(std::move(c_computation));\n module->AddEntryComputation(std::move(a_computation));\n module->AddEmbeddedComputation(std::move(b_computation));\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(GetAllocation(*assignment, a_call, {}),\n GetAllocation(*assignment, b_call, {}));\n EXPECT_EQ(GetAllocation(*assignment, b_call, {}),\n GetAllocation(*assignment, c_call, {}));\n EXPECT_EQ(GetAllocation(*assignment, c_call, {}),\n GetAllocation(*assignment, d_param, {}));\n EXPECT_EQ(GetAllocation(*assignment, a_call, {0}),\n GetAllocation(*assignment, b_call, {0}));\n EXPECT_EQ(GetAllocation(*assignment, b_call, {0}),\n GetAllocation(*assignment, c_call, {0}));\n EXPECT_EQ(GetAllocation(*assignment, c_call, {0}),\n GetAllocation(*assignment, d_param, {0}));\n EXPECT_TRUE(BuffersDistinct({a_param}, {b_param}, *assignment));\n EXPECT_TRUE(BuffersDistinct({a_param}, {c_param}, *assignment));\n EXPECT_TRUE(BuffersDistinct({a_param}, {d_param}, *assignment));\n EXPECT_EQ(GetAllocation(*assignment, b_param, {0}),\n GetAllocation(*assignment, c_param, {0}));\n EXPECT_EQ(GetAllocation(*assignment, c_param, {0}),\n GetAllocation(*assignment, d_param, {0}));\n}\nTEST_F(BufferAssignmentTest, BitcastAsOutput) {\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {42}), \"param\"));\n auto bitcast = builder.AddInstruction(\n HloInstruction::CreateBitcast(param->shape(), param));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(1, assignment->Allocations().size());\n EXPECT_EQ(GetTopLevelAllocation(*assignment, param),\n GetTopLevelAllocation(*assignment, bitcast));\n}\nTEST_F(BufferAssignmentTest, TupleBufferNotReused) {\n auto builder = HloComputation::Builder(TestName());\n auto scalar_shape = ShapeUtil::MakeShape(F32, {});\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, scalar_shape, \"param0\"));\n auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({param}));\n auto tuple_element = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(scalar_shape, tuple, 0));\n auto copy = builder.AddInstruction(HloInstruction::CreateUnary(\n scalar_shape, HloOpcode::kCopy, tuple_element));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment_orig = RunBufferAssignment(module.get());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr assignment,\n ConvertToProtoAndBack(assignment_orig.get(), module.get()));\n EXPECT_EQ(3, assignment->Allocations().size());\n EXPECT_NE(GetTopLevelAllocation(*assignment, tuple),\n GetTopLevelAllocation(*assignment, copy));\n}\nTEST_F(BufferAssignmentTest, OneTempAllocation) {\n auto builder = HloComputation::Builder(TestName());\n Shape shape_2x3 = ShapeUtil::MakeShape(F32, {2, 3});\n Shape shape_2x4 = ShapeUtil::MakeShape(F32, {2, 4});\n Shape shape_3x4 = ShapeUtil::MakeShape(F32, {3, 4});\n Shape shape_4x4 = ShapeUtil::MakeShape(F32, {4, 4});\n Shape shape_5x4 = ShapeUtil::MakeShape(F32, {5, 4});\n auto param_a = builder.AddInstruction(\n HloInstruction::CreateParameter(0, shape_2x3, \"param_a\"));\n auto param_b = builder.AddInstruction(\n HloInstruction::CreateParameter(1, shape_3x4, \"param_b\"));\n auto param_c = builder.AddInstruction(\n HloInstruction::CreateParameter(2, shape_4x4, \"param_c\"));\n DotDimensionNumbers dot_dnums;\n dot_dnums.add_lhs_contracting_dimensions(1);\n dot_dnums.add_rhs_contracting_dimensions(0);\n PrecisionConfig precision_config;\n precision_config.mutable_operand_precision()->Resize(\n 2, PrecisionConfig::DEFAULT);\n auto dot_ab = builder.AddInstruction(HloInstruction::CreateDot(\n shape_2x4, param_a, param_b, dot_dnums, precision_config));\n auto dot_bc = builder.AddInstruction(HloInstruction::CreateDot(\n shape_3x4, param_b, param_c, dot_dnums, precision_config));\n builder.AddInstruction(\n HloInstruction::CreateConcatenate(shape_5x4, {dot_ab, dot_bc}, 0));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto assignment = RunBufferAssignment(module.get(), 1);\n EXPECT_EQ(5, assignment->Allocations().size());\n BufferAllocation::Slice slice_ab =\n assignment->GetUniqueTopLevelSlice(dot_ab).value();\n BufferAllocation::Slice slice_bc =\n assignment->GetUniqueTopLevelSlice(dot_bc).value();\n EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation());\n EXPECT_NE(slice_ab, slice_bc);\n EXPECT_EQ(32, slice_ab.size());\n EXPECT_EQ(48, slice_bc.size());\n EXPECT_EQ(80, slice_ab.allocation()->size());\n EXPECT_EQ(80, slice_bc.allocation()->size());\n assignment = RunBufferAssignment(module.get(), 64);\n EXPECT_EQ(5, assignment->Allocations().size());\n slice_ab = assignment->GetUniqueTopLevelSlice(dot_ab).value();\n slice_bc = assignment->GetUniqueTopLevelSlice(dot_bc).value();\n EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation());\n EXPECT_NE(slice_ab, slice_bc);\n EXPECT_EQ(32, slice_ab.size());\n EXPECT_EQ(48, slice_bc.size());\n if (slice_ab.offset() == 0) {\n EXPECT_EQ(64, slice_bc.offset());\n EXPECT_EQ(64 + 48, slice_ab.allocation()->size());\n EXPECT_EQ(64 + 48, slice_bc.allocation()->size());\n } else {\n EXPECT_EQ(64, slice_ab.offset());\n EXPECT_EQ(0, slice_bc.offset());\n EXPECT_EQ(64 + 32, slice_ab.allocation()->size());\n EXPECT_EQ(64 + 32, slice_bc.allocation()->size());\n }\n}\nTEST_F(BufferAssignmentTest, TrivialPeakBuffers) {\n auto builder = HloComputation::Builder(TestName());\n auto paramscalar =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, \"p\"));\n auto broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {}));\n auto param0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, f32vec100_, \"p1\"));\n auto param1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, f32vec100_, \"p2\"));\n auto mul = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kMultiply, broadcast, param0));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1));\n auto sub = builder.AddInstruction(HloInstruction::CreateBinary(\n f32vec100_, HloOpcode::kSubtract, add, param1));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers = RunBufferAssignment(module.get());\n const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul);\n const std::vector& peak_buffers =\n mul_buffer.PeakMemoryLogicalBuffers();\n ASSERT_EQ(peak_buffers.size(), 1);\n EXPECT_EQ(peak_buffers[0]->instruction(), sub);\n}\nTEST_F(BufferAssignmentTest, PeakBuffers) {\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, f32vec100_, \"p\"));\n auto log = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kLog, param));\n auto rev = builder.AddInstruction(\n HloInstruction::CreateReverse(f32vec100_, log, {0}));\n auto neg = builder.AddInstruction(\n HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param));\n const Shape concat_shape = ShapeUtil::MakeShape(F32, {200});\n auto concat = builder.AddInstruction(\n HloInstruction::CreateConcatenate(concat_shape, {rev, neg}, 0));\n auto root = builder.AddInstruction(HloInstruction::CreateSlice(\n ShapeUtil::MakeShape(F32, {1}), concat, {0}, {1}, {1}));\n auto module = CreateNewVerifiedModule();\n module->AddEntryComputation(builder.Build());\n auto buffers = RunBufferAssignmentWithInstructionSequence(\n module.get(), {param, log, rev, neg, concat, root});\n const BufferAllocation& buffer = GetTopLevelAllocation(*buffers, concat);\n EXPECT_FALSE(buffer.IsInputOrOutput());\n EXPECT_TRUE(buffer.IsPreallocatedTempBuffer());\n ASSERT_EQ(buffer.assigned_buffers().size(), 4);\n const std::vector& peak_buffers =\n buffer.PeakMemoryLogicalBuffers();\n ASSERT_EQ(peak_buffers.size(), 3);\n std::vector peak_instructions;\n for (const HloValue* logical_buffer : peak_buffers) {\n peak_instructions.push_back(logical_buffer->instruction());\n }\n EXPECT_THAT(peak_instructions, UnorderedElementsAre(rev, neg, concat));\n}\nTEST_F(BufferAssignmentTest, AliasedBuffersShouldntCoexistInPeakBuffers) {\n std::string hlo_text = R\"(\nHloModule test_module, is_scheduled=true\ncond {\n param = (s32[], s32[]) parameter(0)\n ROOT constant = pred[] constant(true)\n}\nbody {\n param.0 = (s32[], s32[]) parameter(0)\n gte = s32[] get-tuple-element(param.0), index=0\n add = s32[] add(gte, gte)\n ROOT tuple = (s32[], s32[]) tuple(add, add)\n}\nENTRY test_module {\n param.3 = s32[] parameter(0)\n copy = s32[] copy(param.3)\n tuple = (s32[], s32[]) tuple(copy, copy)\n while = (s32[], s32[]) while(tuple), condition=cond, body=body\n gte = s32[] get-tuple-element(while), index=0\n ROOT negate = s32[] negate(gte)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));\n auto assignment = RunBufferAssignmentWithSequentialOrdering(module.get());\n const BufferAllocation& buffer =\n GetTopLevelAllocation(*assignment, FindInstruction(module.get(), \"copy\"));\n const std::vector& peak_buffers =\n buffer.PeakMemoryLogicalBuffers();\n int num_peak_buffers = 0;\n for (const HloValue* peak_buffer : peak_buffers) {\n if (peak_buffer->shape().IsArray()) {\n ++num_peak_buffers;\n }\n }\n EXPECT_EQ(num_peak_buffers, 1);\n}\nTEST_F(BufferAssignmentTest, InPlaceBuffer) {\n const char* hlo_text = R\"(\nHloModule Module\nENTRY main {\n state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)\n constant.1 = f32[] constant(0)\n broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}\n get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1\n get-tuple-element.3 = s32[] get-tuple-element(state), index=0\n constant.2 = s32[] constant(128)\n add.5 = s32[] add(get-tuple-element.3, constant.2)\n constant.3 = s32[] constant(0)\n dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)\n dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)\n ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));\n HloInstruction* parameter =\n m->entry_computation()->GetInstructionWithName(\"get-tuple-element.4\");\n HloInstruction* dus1 =\n m->entry_computation()->GetInstructionWithName(\"dynamic-update-slice.5\");\n HloInstruction* dus2 =\n m->entry_computation()->GetInstructionWithName(\"dynamic-update-slice.9\");\n auto buffers = RunBufferAssignment(m.get());\n {\n const BufferAllocation& parameter_alloc =\n GetTopLevelAllocation(*buffers, parameter);\n const BufferAllocation& dus1_alloc = GetTopLevelAllocation(*buffers, dus1);\n EXPECT_EQ(parameter_alloc, dus1_alloc);\n const BufferAllocation& dus2_alloc = GetTopLevelAllocation(*buffers, dus2);\n EXPECT_EQ(parameter_alloc, dus2_alloc);\n }\n}\nTEST_F(BufferAssignmentTest, ConstantBuffersAreNotReused) {\n const char* hlo_text = R\"(\nHloModule Module\nTrue {\n ROOT x.0.1 = f32[] parameter(0)\n}\nFalse {\n x.0.0 = f32[] parameter(0)\n ROOT copy.1 = f32[] copy(x.0.0)\n}\nENTRY main {\n pred.1.0 = pred[] parameter(0)\n constant.1.1 = f32[] constant(56)\n copy.2 = f32[] copy(constant.1.1)\n constant.1.2 = f32[] constant(12)\n ROOT conditional.1.3 = f32[] conditional(pred.1.0, copy.2, constant.1.2),\n true_computation=True, false_computation=False\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));\n HloInstruction* constant_1 =\n m->entry_computation()->GetInstructionWithName(\"constant.1.1\");\n HloInstruction* constant_2 =\n m->entry_computation()->GetInstructionWithName(\"constant.1.2\");\n auto buffers = RunBufferAssignment(m.get());\n {\n const BufferAllocation& allocation_for_const_1 =\n GetTopLevelAllocation(*buffers, constant_1);\n EXPECT_TRUE(allocation_for_const_1.is_constant());\n for (const auto& buffer_offset_pair :\n allocation_for_const_1.assigned_buffers()) {\n EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),\n HloOpcode::kCopy);\n EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),\n HloOpcode::kConditional);\n }\n }\n {\n const BufferAllocation& allocation_for_const_2 =\n GetTopLevelAllocation(*buffers, constant_2);\n EXPECT_TRUE(allocation_for_const_2.is_constant());\n for (const auto& buffer_offset_pair :\n allocation_for_const_2.assigned_buffers()) {\n EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),\n HloOpcode::kCopy);\n EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),\n HloOpcode::kConditional);\n }\n }\n}\nclass WhileBufferAssignmentTest : public HloTestBase {\n protected:\n std::unique_ptr BuildWhileConditionComputation(\n const std::string& name) {\n auto builder = HloComputation::Builder(name);\n builder.AddInstruction(\n HloInstruction::CreateParameter(0, loop_state_shape_, \"loop_state\"));\n auto zero = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));\n auto ten = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(10)));\n builder.AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::MakeShape(PRED, {}), zero, ten, ComparisonDirection::kLt));\n return builder.Build();\n }\n std::unique_ptr BuildWhileBodyComputation(\n const std::string& name) {\n auto builder = HloComputation::Builder(name);\n auto loop_state = builder.AddInstruction(\n HloInstruction::CreateParameter(0, loop_state_shape_, \"loop_state\"));\n auto input = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 0));\n auto weights = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));\n auto output = builder.AddInstruction(HloInstruction::CreateBinary(\n data_shape_, HloOpcode::kMultiply, input, weights));\n builder.AddInstruction(\n HloInstruction::CreateTuple({input, weights, output}));\n return builder.Build();\n }\n std::unique_ptr RunBufferAssignment(HloModule* module,\n int64_t alignment = 1) {\n HloSchedule schedule = ScheduleModule(module, ByteSizeOf).value();\n return BufferAssigner::Run(\n module, std::make_unique(schedule),\n ByteSizeOf,\n [alignment](LogicalBuffer::Color) { return alignment; },\n true)\n .value();\n }\n static int64_t ByteSizeOf(const BufferValue& buffer) {\n return ShapeUtil::ByteSizeOf(buffer.shape(), sizeof(void*));\n }\n Shape data_shape_ = ShapeUtil::MakeShape(F32, {4});\n Shape loop_state_shape_ =\n ShapeUtil::MakeTupleShape({data_shape_, data_shape_, data_shape_});\n};\nstatic void RunCopyInsertion(HloModule* module) {\n CopyInsertion copy_insertion;\n EXPECT_IS_OK(copy_insertion.Run(module).status());\n}\nTEST_F(WhileBufferAssignmentTest, TwoForwardWhileLoops) {\n auto module = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(\"entry\");\n auto input0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, data_shape_, \"input0\"));\n auto weights0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, data_shape_, \"weights0\"));\n auto weights1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, data_shape_, \"weights1\"));\n auto zero = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto output0 = builder.AddInstruction(\n HloInstruction::CreateBroadcast(data_shape_, zero, {}));\n auto output1 = builder.AddInstruction(\n HloInstruction::CreateBroadcast(data_shape_, zero, {}));\n auto cond0 =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"cond\"));\n auto body0 =\n module->AddEmbeddedComputation(BuildWhileBodyComputation(\"body\"));\n auto tuple0 = builder.AddInstruction(\n HloInstruction::CreateTuple({input0, weights0, output0}));\n auto while0 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));\n auto cond1 =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"cond\"));\n auto body1 =\n module->AddEmbeddedComputation(BuildWhileBodyComputation(\"body\"));\n auto input1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape_, while0, 2));\n auto tuple1 = builder.AddInstruction(\n HloInstruction::CreateTuple({input1, weights1, output1}));\n auto while1 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1));\n module->AddEntryComputation(builder.Build());\n RunCopyInsertion(module.get());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(assignment->GetUniqueSlice(input0, {}).value(),\n assignment->GetUniqueSlice(while0, {0}).value());\n EXPECT_EQ(assignment->GetUniqueSlice(weights0, {}).value(),\n assignment->GetUniqueSlice(while0, {1}).value());\n EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(),\n assignment->GetUniqueSlice(while1, {0}).value());\n EXPECT_EQ(assignment->GetUniqueSlice(weights1, {}).value(),\n assignment->GetUniqueSlice(while1, {1}).value());\n}\nTEST_F(WhileBufferAssignmentTest, ColocatedBufferWithEntryParameter) {\n const Shape r0s32 = ShapeUtil::MakeShape(S32, {});\n const char* module_str = R\"(\nHloModule test_module\n%cond.v0 {\n %param = s32[] parameter(0)\n ROOT %constant = pred[] constant(true)\n}\n%cond.v1 {\n %param.0 = s32[] parameter(0)\n ROOT %constant.0 = pred[] constant(true)\n}\n%body.v0 {\n ROOT %param.1 = s32[] parameter(0)\n}\n%body.v1 {\n %param.2 = s32[] parameter(0)\n ROOT add = s32[] add(%param.2, %param.2)\n}\nENTRY %test_module {\n %param.3 = s32[] parameter(0)\n %while.0 = s32[] while(%param.3), condition=%cond.v0, body=%body.v0\n %mul = s32[] multiply(%while.0, %while.0)\n %while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1\n ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));\n int64_t instruction_count = m->instruction_count();\n CopyInsertion copy_insertion;\n ASSERT_IS_OK(copy_insertion.Run(m.get()).status());\n ASSERT_EQ(instruction_count, m->instruction_count());\n const HloInstruction* bcast = m->entry_computation()->root_instruction();\n const HloInstruction* param =\n m->entry_computation()->parameter_instruction(0);\n ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);\n const HloInstruction* while1 = bcast->operand(0);\n ASSERT_EQ(while1->opcode(), HloOpcode::kWhile);\n const HloInstruction* while0 = while1->operand(0)->operand(0);\n ASSERT_EQ(while0->opcode(), HloOpcode::kWhile);\n auto assignment = RunBufferAssignment(m.get());\n TF_ASSERT_OK_AND_ASSIGN(auto slice_param,\n assignment->GetUniqueSlice(param, {}));\n TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,\n assignment->GetUniqueSlice(while0, {}));\n TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,\n assignment->GetUniqueSlice(while1, {}));\n EXPECT_EQ(slice_param, slice_while0);\n EXPECT_NE(slice_param, slice_while1);\n}\nTEST_F(WhileBufferAssignmentTest, ColocatedBufferWithConstant) {\n const Shape r0s32 = ShapeUtil::MakeShape(S32, {});\n const char* module_str = R\"(\nHloModule test_module\n%cond.v0 {\n %param = s32[] parameter(0)\n ROOT %constant = pred[] constant(true)\n}\n%cond.v1 {\n %param.0 = s32[] parameter(0)\n ROOT %constant.0 = pred[] constant(true)\n}\n%body.v0 {\n ROOT %param.1 = s32[] parameter(0)\n}\n%body.v1 {\n %param.2 = s32[] parameter(0)\n ROOT add = s32[] add(%param.2, %param.2)\n}\nENTRY %test_module {\n %constant.42 = s32[] constant(42)\n %while.0 = s32[] while(%constant.42), condition=%cond.v0, body=%body.v0\n %mul = s32[] multiply(%while.0, %while.0)\n %while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1\n ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));\n int64_t instruction_count = m->instruction_count();\n CopyInsertion copy_insertion;\n ASSERT_IS_OK(copy_insertion.Run(m.get()).status());\n ASSERT_EQ(instruction_count, m->instruction_count());\n const HloInstruction* bcast = m->entry_computation()->root_instruction();\n const HloInstruction* constant =\n m->entry_computation()->GetInstructionWithName(\"constant.42\");\n ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);\n const HloInstruction* while1 = bcast->operand(0);\n ASSERT_EQ(while1->opcode(), HloOpcode::kWhile);\n const HloInstruction* while0 = while1->operand(0)->operand(0);\n ASSERT_EQ(while0->opcode(), HloOpcode::kWhile);\n auto assignment = RunBufferAssignment(m.get());\n TF_ASSERT_OK_AND_ASSIGN(auto slice_constant,\n assignment->GetUniqueSlice(constant, {}));\n TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,\n assignment->GetUniqueSlice(while0, {}));\n TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,\n assignment->GetUniqueSlice(while1, {}));\n EXPECT_EQ(slice_constant, slice_while0);\n EXPECT_NE(slice_constant, slice_while1);\n}\nTEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {\n const Shape r0s32 = ShapeUtil::MakeShape(S32, {});\n auto build_cond = [&]() {\n auto builder = HloComputation::Builder(\"cond\");\n auto const4 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(4)));\n auto param =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, \"x\"));\n builder.AddInstruction(\n HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param,\n const4, ComparisonDirection::kLt));\n return builder.Build();\n };\n auto build_body = [&]() {\n auto builder = HloComputation::Builder(\"body\");\n auto const9 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(9)));\n auto param =\n builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, \"x\"));\n builder.AddInstruction(\n HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, param, const9));\n return builder.Build();\n };\n auto module = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(\"entry\");\n auto token = builder.AddInstruction(HloInstruction::CreateToken());\n auto infeed =\n builder.AddInstruction(HloInstruction::CreateInfeed(r0s32, token, \"\"));\n auto infeed_data = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(r0s32, infeed, 0));\n auto cond0 = module->AddEmbeddedComputation(build_cond());\n auto body0 = module->AddEmbeddedComputation(build_body());\n auto while0 = builder.AddInstruction(\n HloInstruction::CreateWhile(r0s32, cond0, body0, infeed_data));\n auto cond1 = module->AddEmbeddedComputation(build_cond());\n auto body1 = module->AddEmbeddedComputation(build_body());\n auto while1 = builder.AddInstruction(\n HloInstruction::CreateWhile(r0s32, cond1, body1, while0));\n auto zero = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, zero, zero));\n auto cond2 = module->AddEmbeddedComputation(build_cond());\n auto body2 = module->AddEmbeddedComputation(build_body());\n auto while2 = builder.AddInstruction(\n HloInstruction::CreateWhile(r0s32, cond2, body2, add));\n auto tuple =\n builder.AddInstruction(HloInstruction::CreateTuple({while2, while1}));\n module->AddEntryComputation(builder.Build());\n int64_t instruction_count = module->instruction_count();\n CopyInsertion copy_insertion;\n ASSERT_IS_OK(copy_insertion.Run(module.get()).status());\n ASSERT_EQ(instruction_count, module->instruction_count());\n TF_ASSERT_OK_AND_ASSIGN(\n HloSchedule schedule,\n ScheduleModule(module.get(), [](const BufferValue& buffer) {\n return ShapeUtil::ByteSizeOf(buffer.shape(),\n sizeof(void*));\n }));\n schedule.set_sequence(\n module->entry_computation(),\n {token, infeed, infeed_data, while0, while1, zero, add, while2, tuple});\n TF_ASSERT_OK(schedule.Verify());\n TF_ASSERT_OK_AND_ASSIGN(\n auto assignment,\n BufferAssigner::Run(\n module.get(), std::make_unique(schedule),\n backend().compiler()->BufferSizeBytesFunction(),\n [](LogicalBuffer::Color) { return 1; },\n true));\n TF_ASSERT_OK_AND_ASSIGN(auto slice0, assignment->GetUniqueSlice(tuple, {0}));\n TF_ASSERT_OK_AND_ASSIGN(auto slice1, assignment->GetUniqueSlice(tuple, {1}));\n EXPECT_NE(slice0, slice1);\n TF_ASSERT_OK_AND_ASSIGN(auto slice_while0,\n assignment->GetUniqueSlice(while0, {}));\n TF_ASSERT_OK_AND_ASSIGN(auto slice_while1,\n assignment->GetUniqueSlice(while1, {}));\n EXPECT_EQ(slice1, slice_while0);\n EXPECT_EQ(slice1, slice_while1);\n TF_ASSERT_OK_AND_ASSIGN(auto slice_while2,\n assignment->GetUniqueSlice(while2, {}));\n EXPECT_EQ(slice0, slice_while2);\n}\nTEST_F(WhileBufferAssignmentTest, OneForwardBackwardWhileLoopSet) {\n auto module = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(\"entry\");\n auto input0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, data_shape_, \"input0\"));\n auto weights0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, data_shape_, \"weights0\"));\n auto zero = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto output0 = builder.AddInstruction(\n HloInstruction::CreateBroadcast(data_shape_, zero, {}));\n auto cond0 =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"cond\"));\n auto body0 =\n module->AddEmbeddedComputation(BuildWhileBodyComputation(\"body\"));\n auto tuple0 = builder.AddInstruction(\n HloInstruction::CreateTuple({input0, weights0, output0}));\n auto while0 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));\n auto cond1 =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"cond\"));\n auto body1 =\n module->AddEmbeddedComputation(BuildWhileBodyComputation(\"body\"));\n auto while1 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, while0));\n module->AddEntryComputation(builder.Build());\n RunCopyInsertion(module.get());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_EQ(assignment->GetUniqueSlice(while0, {0}).value(),\n assignment->GetUniqueSlice(while1, {0}).value());\n EXPECT_EQ(assignment->GetUniqueSlice(while0, {1}).value(),\n assignment->GetUniqueSlice(while1, {1}).value());\n EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(),\n assignment->GetUniqueSlice(while1, {2}).value());\n}\nTEST_F(BufferAssignmentTest, TwoCalls) {\n auto module = CreateNewVerifiedModule();\n Shape r0f32 = ShapeUtil::MakeShape(xla::F32, {});\n HloComputation* sub_computation;\n {\n auto builder = HloComputation::Builder(TestName() + \"_sub_comp\");\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, r0f32, \"param\"));\n auto constant1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto add = builder.AddInstruction(\n HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, constant1));\n sub_computation = module->AddEmbeddedComputation(builder.Build(add));\n }\n auto builder = HloComputation::Builder(TestName());\n auto constant2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n auto constant3 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(3.0)));\n auto call1 = builder.AddInstruction(\n HloInstruction::CreateCall(r0f32, {constant2}, sub_computation));\n auto call2 = builder.AddInstruction(\n HloInstruction::CreateCall(r0f32, {constant3}, sub_computation));\n auto add1 = builder.AddInstruction(\n HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call1, constant2));\n auto add2 = builder.AddInstruction(\n HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call2, add1));\n module->AddEntryComputation(builder.Build(add2));\n {\n FlattenCallGraph flatten;\n TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get()));\n EXPECT_TRUE(result);\n std::unique_ptr call_graph = CallGraph::Build(module.get());\n }\n RunCopyInsertion(module.get());\n auto assignment = RunBufferAssignment(module.get());\n EXPECT_TRUE(BuffersDistinct({call1}, {call2}, *assignment));\n}\nTEST_F(BufferAssignmentTest, CallParamCoAllocation) {\n const char* hlo_text = R\"(\nHloModule CallParamCoAllocation\nCallee {\n param0 = (f32[100],(f32[200],f32[300])) parameter(0)\n param1 = s32[20] parameter(1)\n ROOT constant = f32[] constant(1)\n}\nENTRY Main {\n entry_param0 = f32[100] parameter(0)\n entry_param1 = s32[20] parameter(1)\n custom_call = (f32[200],f32[300]) custom-call(), custom_call_target=\"call-target\"\n call_op0 = (f32[100],(f32[200],f32[300])) tuple(entry_param0, custom_call)\n ROOT call_result = f32[] call(call_op0, entry_param1), to_apply=Callee\n}\n)\";\n HloModuleConfig config;\n config.set_debug_options(GetDebugOptionsFromFlags());\n TF_ASSERT_OK_AND_ASSIGN(auto m,\n ParseAndReturnVerifiedModule(hlo_text, config));\n auto buffers = RunBufferAssignment(m.get());\n HloComputation* main = m->entry_computation();\n HloComputation* callee = m->GetComputationWithName(\"Callee\");\n EXPECT_NE(callee, nullptr);\n HloInstruction* param0 = callee->parameter_instruction(0);\n HloInstruction* param1 = callee->parameter_instruction(1);\n HloInstruction* entry_param0 = main->parameter_instruction(0);\n HloInstruction* entry_param1 = main->parameter_instruction(1);\n HloInstruction* custom_call = main->GetInstructionWithName(\"custom_call\");\n EXPECT_EQ(GetAllocation(*buffers, entry_param0, {}),\n GetAllocation(*buffers, param0, {0}));\n EXPECT_EQ(GetAllocation(*buffers, entry_param1, {}),\n GetAllocation(*buffers, param1, {}));\n EXPECT_EQ(GetAllocation(*buffers, custom_call, {}),\n GetAllocation(*buffers, param0, {1}));\n EXPECT_EQ(GetAllocation(*buffers, custom_call, {0}),\n GetAllocation(*buffers, param0, {1, 0}));\n EXPECT_EQ(GetAllocation(*buffers, custom_call, {1}),\n GetAllocation(*buffers, param0, {1, 1}));\n}\nTEST_F(BufferAssignmentTest, AsyncCall) {\n const char* hlo_text = R\"(\nHloModule AsyncCall, is_scheduled=true\n%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {\n %param_0 = f32[4096]{0} parameter(0)\n %param_1 = f32[4096]{0} parameter(1)\n %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)\n %negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)\n %negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)\n %negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)\n ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)\n}\nENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {\n %a = f32[4096]{0} parameter(0)\n %b = f32[4096]{0} parameter(1)\n %async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), to_apply=%called_computation\n %negate_4 = f32[4096]{0} negate(f32[4096]{0} %a)\n %negate_5 = f32[4096]{0} negate(f32[4096]{0} %b)\n %negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5)\n %negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6)\n %add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7)\n %async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)\n ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));\n auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());\n LOG(INFO) << buffers->ToString();\n auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {\n return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)\n .value();\n };\n EXPECT_EQ(get_slice(\"param_0\", {}), get_slice(\"a\", {}));\n EXPECT_EQ(get_slice(\"param_1\", {}), get_slice(\"b\", {}));\n EXPECT_EQ(get_slice(\"result.1\", {}), get_slice(\"async-done\", {}));\n for (const auto& hlo_name :\n {\"negate_0\", \"negate_1\", \"negate_2\", \"negate_3\"}) {\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_4\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_5\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_6\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_7\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"add_0\", {}));\n }\n}\nTEST_F(BufferAssignmentTest, AsyncCallPrivateStack) {\n const char* hlo_text = R\"(\nHloModule AsyncCall, is_scheduled=true\n%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {\n %param_0 = f32[4096]{0} parameter(0)\n %param_1 = f32[4096]{0} parameter(1)\n %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)\n %negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)\n %negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)\n %negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)\n ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)\n}, execution_thread=\"foobar\"\nENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {\n %a = f32[4096]{0} parameter(0)\n %b = f32[4096]{0} parameter(1)\n %async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread=\"foobar\", to_apply=%called_computation\n %negate_4 = f32[4096]{0} negate(f32[4096]{0} %a)\n %negate_5 = f32[4096]{0} negate(f32[4096]{0} %b)\n %negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5)\n %negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6)\n %add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7)\n %async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)\n ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));\n auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {\n for (const HloBuffer& buffer : alias_analysis->buffers()) {\n int color = 1;\n for (const HloValue* value : buffer.values()) {\n if (absl::c_any_of(\n value->positions(),\n [](const HloPosition& position) {\n return position.instruction->parent()->execution_thread() !=\n \"foobar\";\n }) ||\n absl::c_any_of(value->GetUses(), [](const HloUse& use) {\n return use.instruction->parent()->execution_thread() != \"foobar\";\n })) {\n color = 0;\n }\n }\n for (const HloValue* value : buffer.values()) {\n const HloPosition& defining_position = value->defining_position();\n if (defining_position.shape().has_layout()) {\n const int memory_space =\n defining_position.shape().layout().memory_space();\n if (memory_space != 0) {\n color = memory_space;\n }\n }\n alias_analysis->dataflow_analysis()\n .GetValue(value->id())\n .set_color(BufferValue::Color(color));\n }\n }\n return absl::OkStatus();\n };\n BufferAssigner::PrivateStacks private_stacks;\n private_stacks[1] = {FindComputation(m.get(), \"called_computation\")};\n auto buffers = RunBufferAssignmentWithSequentialOrdering(\n m.get(), 1, colorer, private_stacks);\n LOG(INFO) << buffers->ToString();\n auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {\n return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)\n .value();\n };\n EXPECT_EQ(get_slice(\"param_0\", {}), get_slice(\"a\", {}));\n EXPECT_EQ(get_slice(\"param_1\", {}), get_slice(\"b\", {}));\n EXPECT_EQ(get_slice(\"result.1\", {}), get_slice(\"async-done\", {}));\n for (const auto& hlo_name :\n {\"negate_0\", \"negate_1\", \"negate_2\", \"negate_3\"}) {\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_4\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_5\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_6\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_7\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"add_0\", {}));\n }\n EXPECT_NE(get_slice(\"negate_0\", {}), get_slice(\"negate_1\", {}));\n EXPECT_EQ(get_slice(\"negate_1\", {}), get_slice(\"negate_2\", {}));\n EXPECT_EQ(get_slice(\"negate_1\", {}), get_slice(\"negate_3\", {}));\n}\nTEST_F(BufferAssignmentTest, MultipleAsyncCallPrivateStack) {\n const char* hlo_text = R\"(\nHloModule AsyncCall, is_scheduled=true\n%called_computation1 {\n %param_0 = f32[4096]{0} parameter(0)\n %param_1 = f32[4096]{0} parameter(1)\n %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)\n %negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1)\n %negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1)\n %negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2)\n ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3)\n}, execution_thread=\"foobar\"\n%called_computation2 {\n %param_2 = f32[4096]{0} parameter(0)\n %param_3 = f32[4096]{0} parameter(1)\n %negate_4 = f32[4096]{0} negate(f32[4096]{0} %param_2)\n %negate_5 = f32[4096]{0} negate(f32[4096]{0} %param_3)\n ROOT %result.2 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_5)\n}, execution_thread=\"foobar\"\nENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {\n %a = f32[4096]{0} parameter(0)\n %b = f32[4096]{0} parameter(1)\n %async-start.1 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread=\"foobar\", to_apply=%called_computation1\n %async-start.2 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %b, f32[4096]{0} %a), async_execution_thread=\"foobar\", to_apply=%called_computation2\n %negate_6 = f32[4096]{0} negate(f32[4096]{0} %a)\n %negate_7 = f32[4096]{0} negate(f32[4096]{0} %b)\n %negate_8 = f32[4096]{0} negate(f32[4096]{0} %negate_7)\n %negate_9 = f32[4096]{0} negate(f32[4096]{0} %negate_8)\n %add_0 = f32[4096]{0} add(f32[4096]{0} %negate_6, f32[4096]{0} %negate_9)\n %async-done.1 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.1)\n %async-done.2 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.2)\n %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done.1)\n ROOT %add_2 = f32[4096]{0} add(f32[4096]{0} %add_1, f32[4096]{0} %async-done.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));\n auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) {\n for (const HloBuffer& buffer : alias_analysis->buffers()) {\n int color = 1;\n for (const HloValue* value : buffer.values()) {\n if (absl::c_any_of(\n value->positions(),\n [](const HloPosition& position) {\n return position.instruction->parent()->execution_thread() !=\n \"foobar\";\n }) ||\n absl::c_any_of(value->GetUses(), [](const HloUse& use) {\n return use.instruction->parent()->execution_thread() != \"foobar\";\n })) {\n color = 0;\n }\n }\n for (const HloValue* value : buffer.values()) {\n const HloPosition& defining_position = value->defining_position();\n if (defining_position.shape().has_layout()) {\n const int memory_space =\n defining_position.shape().layout().memory_space();\n if (memory_space != 0) {\n color = memory_space;\n }\n }\n alias_analysis->dataflow_analysis()\n .GetValue(value->id())\n .set_color(BufferValue::Color(color));\n }\n }\n return absl::OkStatus();\n };\n BufferAssigner::PrivateStacks private_stacks;\n private_stacks[1] = {FindComputation(m.get(), \"called_computation1\"),\n FindComputation(m.get(), \"called_computation2\")};\n auto buffers = RunBufferAssignmentWithSequentialOrdering(\n m.get(), 1, colorer, private_stacks);\n LOG(INFO) << buffers->ToString();\n auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {\n return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index)\n .value();\n };\n EXPECT_EQ(get_slice(\"param_0\", {}), get_slice(\"a\", {}));\n EXPECT_EQ(get_slice(\"param_3\", {}), get_slice(\"a\", {}));\n EXPECT_EQ(get_slice(\"param_1\", {}), get_slice(\"b\", {}));\n EXPECT_EQ(get_slice(\"param_2\", {}), get_slice(\"b\", {}));\n EXPECT_EQ(get_slice(\"result.1\", {}), get_slice(\"async-done.1\", {}));\n EXPECT_EQ(get_slice(\"result.2\", {}), get_slice(\"async-done.2\", {}));\n for (const auto& hlo_name : {\"negate_0\", \"negate_1\", \"negate_2\", \"negate_3\",\n \"negate_4\", \"negate_5\"}) {\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_6\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_7\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_8\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"negate_9\", {}));\n EXPECT_NE(get_slice(hlo_name, {}), get_slice(\"add_0\", {}));\n }\n EXPECT_NE(get_slice(\"negate_0\", {}), get_slice(\"negate_1\", {}));\n EXPECT_EQ(get_slice(\"negate_1\", {}), get_slice(\"negate_2\", {}));\n EXPECT_EQ(get_slice(\"negate_1\", {}), get_slice(\"negate_3\", {}));\n EXPECT_TRUE(get_slice(\"negate_4\", {}) == get_slice(\"negate_0\", {}) ||\n get_slice(\"negate_4\", {}) == get_slice(\"negate_1\", {}));\n EXPECT_TRUE(get_slice(\"negate_5\", {}) == get_slice(\"negate_0\", {}) ||\n get_slice(\"negate_5\", {}) == get_slice(\"negate_1\", {}));\n}\nTEST_F(BufferAssignmentTest, AsyncCallImplicitSharding) {\n std::string hlo_string = R\"(\n HloModule module, is_scheduled=true\n called_computation {\n param0 = f32[4] parameter(0)\n constant = f32[1] constant(1)\n dynamic-update-slice = f32[4] dynamic-update-slice(param0, constant, constant)\n ROOT negate = f32[4] negate(dynamic-update-slice)\n }\n ENTRY entry {\n p0 = f32[8] parameter(0)\n call-start = ((f32[8]), f32[8], s32[]) call-start(p0), async_execution_thread=\"foo\", to_apply=called_computation\n ROOT call-done = f32[8] call-done(call-start)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnUnverifiedModule(hlo_string));\n auto buffers = RunBufferAssignmentWithSequentialOrdering(module.get());\n LOG(INFO) << buffers->ToString();\n auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) {\n return buffers\n ->GetUniqueSlice(FindInstruction(module.get(), hlo_name), index)\n .value();\n };\n EXPECT_EQ(get_slice(\"p0\", {}).size(), 32);\n EXPECT_EQ(get_slice(\"dynamic-update-slice\", {}).size(), 32);\n}\nTEST_F(BufferAssignmentTest, AsyncCustomCall) {\n const char* hlo_text = R\"(\nHloModule AsyncCustomCall, is_scheduled=true\nENTRY %main (a: f32[4096]) -> f32[4096] {\n %a = f32[4096]{0} parameter(0)\n %neg_0 = f32[4096]{0} negate(f32[4096]{0} %a)\n %async-start = ((f32[4096]{0}), f32[4096]{0}, u32[])\n custom-call-start(f32[4096]{0} %neg_0),\n custom_call_target=\"Foo\"\n %async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)\n ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));\n auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());\n HloInstruction* neg_0 = FindInstruction(m.get(), \"neg_0\");\n HloInstruction* async_done = FindInstruction(m.get(), \"async-done\");\n EXPECT_FALSE(buffers->SharesTopLevelSlice(neg_0, async_done));\n}\nTEST_F(BufferAssignmentTest, AsyncCustomCallWithAliasing) {\n const char* hlo_text = R\"(\nHloModule AsyncCustomCall, is_scheduled=true\nENTRY %main (a: f32[4096]) -> f32[4096] {\n %a = f32[4096]{0} parameter(0)\n %neg_0 = f32[4096]{0} negate(f32[4096]{0} %a)\n %async-start = ((f32[4096]{0}), f32[4096]{0}, u32[])\n custom-call-start(f32[4096]{0} %neg_0),\n custom_call_target=\"Foo\",\n output_to_operand_aliasing={{}: (0, {})}\n %async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)\n ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text));\n auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get());\n HloInstruction* neg_0 = FindInstruction(m.get(), \"neg_0\");\n HloInstruction* async_done = FindInstruction(m.get(), \"async-done\");\n EXPECT_TRUE(buffers->SharesTopLevelSlice(neg_0, async_done));\n}\nTEST_F(BufferAssignmentTest, BufferIsolation) {\n absl::string_view module_str = R\"(\nHloModule test_module, is_scheduled=true\nENTRY %test_module {\n param.0 = s32[1024]{0} parameter(0)\n param.1 = s32[1024]{0} parameter(1)\n mul1 = s32[1024]{0} multiply(param.0, param.1)\n bcast1 = s32[4,1024]{1,0} broadcast(mul1), dimensions={1}\n bcast2 = s32[4,1024]{1,0} broadcast(param.0), dimensions={1}\n mul2 = s32[1024]{0} multiply(mul1, param.0)\n add1 = s32[1024]{0} add(mul1, mul2)\n sub2 = s32[1024]{0} subtract(mul1, mul2)\n mul3 = s32[1024]{0} multiply(mul2, add1)\n mul4 = s32[1024]{0} multiply(mul3, sub2)\n bcast3 = s32[4,1024]{1,0} broadcast(mul4), dimensions={1}\n add2 = s32[4,1024]{1,0} add(bcast3, bcast2)\n ROOT add3 = s32[4,1024]{1,0} add(add2, bcast1)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));\n std::unique_ptr nonisolation_assignment =\n RunBufferAssignmentWithIsolationOptions(m.get());\n auto nonisolation_allocation =\n absl::c_find_if(nonisolation_assignment->Allocations(),\n [](const BufferAllocation& allocation) {\n return allocation.IsPreallocatedTempBuffer();\n });\n ASSERT_NE(nonisolation_allocation,\n nonisolation_assignment->Allocations().end());\n LOG(INFO) << \"Non-isolation buffers\";\n for (const auto& [value, offset_size] :\n nonisolation_allocation->assigned_buffers()) {\n LOG(INFO) << value->ToShortString() << \": off: \" << offset_size.offset\n << \", size: \" << offset_size.size;\n }\n BufferAssignment::BufferIsolationOptions isolation_options;\n isolation_options.hlo_value_compare =\n [](const HloValue* a, const HloValue* b) { return a->id() < b->id(); };\n isolation_options.config.add_isolation_colors(0);\n isolation_options.config.set_isolation_order_salt(10);\n isolation_options.config.set_isolation_fuel(5);\n isolation_options.config.set_isolation_padding_bytes(1024);\n isolation_options.config.set_base_offset_bytes(12288);\n std::unique_ptr isolation_assignment =\n RunBufferAssignmentWithIsolationOptions(m.get(), isolation_options);\n auto isolation_allocation =\n absl::c_find_if(isolation_assignment->Allocations(),\n [](const BufferAllocation& allocation) {\n return allocation.IsPreallocatedTempBuffer();\n });\n ASSERT_NE(isolation_allocation, isolation_assignment->Allocations().end());\n std::vector ordered_values;\n for (const auto& [value, _] : isolation_allocation->assigned_buffers()) {\n ordered_values.push_back(value);\n }\n absl::c_sort(ordered_values, isolation_options.hlo_value_compare);\n int i;\n int64_t expected_offset = nonisolation_allocation->size() +\n isolation_options.config.base_offset_bytes() +\n isolation_options.config.isolation_padding_bytes();\n ASSERT_GT(ordered_values.size(), isolation_options.config.isolation_fuel());\n LOG(INFO) << \"Isolation buffers\";\n for (i = 0; i < isolation_options.config.isolation_fuel(); ++i) {\n const HloValue* value = ordered_values[i];\n auto offset_size = isolation_allocation->assigned_buffers().at(value);\n LOG(INFO) << value->ToShortString() << \": off: \" << offset_size.offset\n << \", size: \" << offset_size.size;\n EXPECT_EQ(offset_size.offset, expected_offset);\n expected_offset +=\n offset_size.size + isolation_options.config.isolation_padding_bytes();\n }\n for (; i < ordered_values.size(); ++i) {\n const HloValue* value = ordered_values[i];\n auto offset_size = isolation_allocation->assigned_buffers().at(value);\n auto nonisolation_offset_size = absl::c_find_if(\n nonisolation_allocation->assigned_buffers(), [&](const auto& pair) {\n return pair.first->defining_position() == value->defining_position();\n });\n ASSERT_NE(nonisolation_offset_size,\n nonisolation_allocation->assigned_buffers().end());\n LOG(INFO) << value->ToShortString() << \": off: \" << offset_size.offset\n << \", size: \" << offset_size.size;\n EXPECT_EQ(offset_size.offset,\n nonisolation_offset_size->second.offset +\n isolation_options.config.base_offset_bytes());\n }\n}\nTEST_F(BufferAssignmentTest, BufferInfoStringTest) {\n absl::string_view module_str = R\"(\nHloModule test_module\nENTRY %test_module {\n %param.0 = s32[1024]{0} parameter(0)\n %param.1 = s32[1024]{0} parameter(1)\n %mul = s32[1024]{0} multiply(%param.0, %param.1)\n %add = s32[1024]{0} add(%mul, %param.0)\n ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[1024] %add), dimensions={0}\n})\";\n absl::string_view reference_str =\n R\"(buffer_id,buffer_name,offset,size,definition_time,end_time,num_uses,use_times,use_names\n0,\"<0 param.0 @0>\",0,4096,0,5,2,\"2;3\",\"mul, operand 0;add, operand 1\"\n1,\"<1 param.1 @0>\",0,4096,1,5,1,\"2\",\"mul, operand 1\"\n2,\"<2 mul @0>\",0,4096,2,3,1,\"3\",\"add, operand 0\"\n3,\"<3 add @0>\",0,4096,3,4,1,\"4\",\"bcast, operand 0\"\n4,\"<4 bcast @0>\",0,4194304,4,5,0,\"\",\"\"\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));\n HloInstruction* const param0 = FindInstruction(m.get(), \"param.0\");\n HloInstruction* const param1 = FindInstruction(m.get(), \"param.1\");\n HloInstruction* const mul = FindInstruction(m.get(), \"mul\");\n HloInstruction* const add = FindInstruction(m.get(), \"add\");\n HloInstruction* const bcast = FindInstruction(m.get(), \"bcast\");\n auto assignment = RunBufferAssignmentWithInstructionSequence(\n m.get(), {param0, param1, mul, add, bcast});\n const std::string buffer_info_str = assignment->BufferInfoString();\n EXPECT_EQ(buffer_info_str, reference_str);\n}\nTEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {\n auto module = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto zero = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto one = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto input0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, data_shape_, \"input0\"));\n auto weights0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, data_shape_, \"weights0\"));\n auto output0 = builder.AddInstruction(\n HloInstruction::CreateBroadcast(data_shape_, zero, {}));\n auto input1 = builder.AddInstruction(\n HloInstruction::CreateParameter(2, data_shape_, \"input1\"));\n auto weights1 = builder.AddInstruction(\n HloInstruction::CreateParameter(3, data_shape_, \"weights1\"));\n auto output1 = builder.AddInstruction(\n HloInstruction::CreateBroadcast(data_shape_, one, {}));\n auto cond =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"cond\"));\n auto body = module->AddEmbeddedComputation(BuildWhileBodyComputation(\"body\"));\n auto tuple0 = builder.AddInstruction(\n HloInstruction::CreateTuple({input0, weights0, output0}));\n auto tuple1 = builder.AddInstruction(\n HloInstruction::CreateTuple({input1, weights1, output1}));\n auto while0 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple0));\n auto while1 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple1));\n auto gte0 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape_, while0, 0));\n auto gte1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape_, while1, 1));\n auto root_add = builder.AddInstruction(\n HloInstruction::CreateBinary(data_shape_, HloOpcode::kAdd, gte0, gte1));\n module->AddEntryComputation(builder.Build());\n {\n FlattenCallGraph flatten;\n TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get()));\n EXPECT_TRUE(result);\n }\n RunCopyInsertion(module.get());\n HloSchedule schedule = ScheduleModule(module.get(), ByteSizeOf).value();\n schedule.set_sequence(\n module->entry_computation(),\n {input1, weights1, one, output1, while1->mutable_operand(0), while1,\n input0, weights0, zero, output0, while0->mutable_operand(0), while0,\n gte0, gte1, root_add});\n TF_ASSERT_OK(schedule.Verify());\n auto assignment =\n BufferAssigner::Run(\n module.get(), std::make_unique(schedule),\n ByteSizeOf, [](LogicalBuffer::Color) { return 1; },\n true)\n .value();\n EXPECT_TRUE(BuffersDistinct({while0}, {while1}, *assignment));\n}\nTEST_F(WhileBufferAssignmentTest, WhilesDontShareEntryParamIfLiveOut) {\n auto module = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(\"entry\");\n auto input0 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, data_shape_, \"input0\"));\n auto weights0 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, data_shape_, \"weights0\"));\n auto zero = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto output0 = builder.AddInstruction(\n HloInstruction::CreateBroadcast(data_shape_, zero, {}));\n auto output1 = builder.AddInstruction(\n HloInstruction::CreateBroadcast(data_shape_, zero, {}));\n auto cond0 =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"cond\"));\n auto body0 =\n module->AddEmbeddedComputation(BuildWhileBodyComputation(\"body\"));\n auto tuple0 = builder.AddInstruction(\n HloInstruction::CreateTuple({input0, weights0, output0}));\n auto while0 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0));\n auto while0_out = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape_, while0, 2));\n auto cond1 =\n module->AddEmbeddedComputation(BuildWhileConditionComputation(\"cond\"));\n auto body1 =\n module->AddEmbeddedComputation(BuildWhileBodyComputation(\"body\"));\n auto tuple1 = builder.AddInstruction(\n HloInstruction::CreateTuple({while0_out, weights0, output1}));\n auto while1 = builder.AddInstruction(\n HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1));\n auto while1_out = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape_, while1, 2));\n module->AddEntryComputation(builder.Build());\n RunCopyInsertion(module.get());\n auto assignment = RunBufferAssignment(module.get());\n auto* root_alloc =\n assignment->GetUniqueTopLevelSlice(while1_out).value().allocation();\n EXPECT_TRUE(root_alloc->maybe_live_out());\n EXPECT_FALSE(root_alloc->is_entry_computation_parameter());\n}\nTEST_F(WhileBufferAssignmentTest, WhileWithDynamicUpdateSliceShare) {\n const char* const hlo_string = R\"(\nHloModule test\nwhile_body {\n state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)\n constant.1 = f32[] constant(0)\n broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}\n get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1\n get-tuple-element.3 = s32[] get-tuple-element(state), index=0\n constant.2 = s32[] constant(128)\n add.5 = s32[] add(get-tuple-element.3, constant.2)\n constant.3 = s32[] constant(0)\n dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)\n dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)\n ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)\n}\nwhile_condition {\n state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)\n get-tuple-element = s32[] get-tuple-element(state), index=0\n get-tuple-element.1 = s32[] constant(3)\n ROOT less-than.339.338 = pred[] compare(get-tuple-element, get-tuple-element.1), direction=LT\n}\nENTRY entry_computation {\n constant.7 = s32[] constant(0)\n copy.1 = s32[] copy(constant.7)\n constant.6 = f32[] constant(0)\n broadcast.6 = f32[1280,1,128]{2,1,0} broadcast(constant.6), dimensions={}\n tuple.1 = (s32[], f32[1280,1,128]{2,1,0}) tuple(copy.1, broadcast.6)\n while.0 = (s32[], f32[1280,1,128]{2,1,0}) while(tuple.1), condition=while_condition, body=while_body\n ROOT get-tuple-element.2 = s32[] get-tuple-element(while.0), index=0\n}\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n RunCopyInsertion(module.get());\n auto assignment = RunBufferAssignment(module.get());\n auto dus9 = FindInstruction(module.get(), \"dynamic-update-slice.9\");\n auto dus9_alloc_slice = assignment->GetUniqueTopLevelSlice(dus9).value();\n auto dus5 = FindInstruction(module.get(), \"dynamic-update-slice.5\");\n auto dus5_alloc_slice = assignment->GetUniqueTopLevelSlice(dus5).value();\n EXPECT_EQ(dus9_alloc_slice.allocation(), dus5_alloc_slice.allocation());\n EXPECT_EQ(dus9_alloc_slice, dus5_alloc_slice);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1138,"cells":{"ID":{"kind":"string","value":"464b7a60-847b-4ac6-bf92-ce88e1c0c36b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"conditional_canonicalizer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/conditional_canonicalizer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/conditional_canonicalizer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/conditional_canonicalizer.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/status_macros.h\"\nnamespace xla {\nnamespace {\nabsl::Status CanonicalizeNonTupleConditional(HloInstruction* conditional) {\n TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional);\n for (auto* branch : conditional->called_computations()) {\n HloInstruction* root = branch->root_instruction();\n TF_RET_CHECK(!root->shape().IsTuple());\n HloInstruction* tuple =\n branch->AddInstruction(HloInstruction::CreateTuple({root}));\n branch->set_root_instruction(tuple, true);\n }\n auto parent = conditional->parent();\n const Shape& root_shape = conditional->shape();\n auto new_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(&root_shape, 1));\n auto new_conditional =\n parent->AddInstruction(conditional->CloneWithNewShape(new_shape));\n auto gte = parent->AddInstruction(\n HloInstruction::CreateGetTupleElement(root_shape, new_conditional, 0));\n TF_RETURN_IF_ERROR(parent->ReplaceInstruction(conditional, gte));\n return absl::OkStatus();\n}\n} \nabsl::StatusOr ConditionalCanonicalizer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n XLA_VLOG_LINES(\n 2, \"ConditionalCanonicalizer::Run(), before:\\n\" + module->ToString());\n bool changed = false;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n if (inst->opcode() == HloOpcode::kConditional &&\n !inst->shape().IsTuple()) {\n TF_RETURN_IF_ERROR(CanonicalizeNonTupleConditional(inst));\n changed = true;\n }\n }\n }\n XLA_VLOG_LINES(\n 2, \"ConditionalCanonicalizer::Run(), after:\\n\" + module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/conditional_canonicalizer.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/literal_test_util.h\"\n#include \"xla/tests/test_utils.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass ConditionalCanonicalizerTest : public HloTestBase {\n protected:\n ConditionalCanonicalizerTest() {}\n};\nTEST_F(ConditionalCanonicalizerTest, DenseArrayConditionalRewrite) {\n auto module = ParseAndReturnVerifiedModule(R\"(\nHloModule _\ntrue_branch {\n true_param = (s32[3,2]) parameter(0)\n ROOT root = s32[] constant(0)\n}\nfalse_branch {\n false_param = (s32[3,2]) parameter(0)\n ROOT root = s32[] constant(1)\n}\nENTRY entry {\n param0 = s32[3,2] parameter(0)\n branch = pred[] constant(false)\n param_tuple = (s32[3 ,2]) tuple(param0)\n ROOT conditional = s32[] conditional(branch, param_tuple, param_tuple),\n true_computation=true_branch, false_computation=false_branch\n}\n)\")\n .value();\n ConditionalCanonicalizer pass;\n EXPECT_TRUE(pass.Run(module.get()).value());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::GetTupleElement(op::Conditional()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1139,"cells":{"ID":{"kind":"string","value":"62bf0644-5132-4e87-bff3-4b0982acdd07"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"infeed_token_propagation"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/infeed_token_propagation.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/infeed_token_propagation_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/infeed_token_propagation.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/tuple_simplifier.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nbool IsDanglingInfeed(HloInstruction* infeed) {\n CHECK(infeed->opcode() == HloOpcode::kInfeed);\n if (infeed->has_sharding()) {\n return false;\n }\n if (const HloInstruction* after_all = infeed->operand(0);\n after_all->opcode() != HloOpcode::kAfterAll ||\n after_all->operand_count() != 0) {\n return false;\n }\n for (const HloInstruction* user : infeed->users()) {\n if (user->opcode() == HloOpcode::kGetTupleElement &&\n user->tuple_index() == 1) {\n return false;\n }\n }\n return true;\n}\nbool IsDanglingOutfeed(HloInstruction* outfeed) {\n CHECK(outfeed->opcode() == HloOpcode::kOutfeed);\n if (outfeed->has_sharding()) {\n return false;\n }\n if (const HloInstruction* after_all = outfeed->operand(1);\n after_all->opcode() != HloOpcode::kAfterAll ||\n after_all->operand_count() != 0) {\n return false;\n }\n if (outfeed->user_count() != 0) {\n return false;\n }\n return true;\n}\nHloInstruction* ReconstructTuple(HloInstruction* tuple) {\n CHECK(tuple->shape().IsTuple());\n HloComputation* computation = tuple->parent();\n std::vector gtes;\n gtes.resize(tuple->shape().tuple_shapes_size());\n for (int64_t idx = 0; idx < gtes.size(); ++idx) {\n gtes[idx] = computation->AddInstruction(\n HloInstruction::CreateGetTupleElement(tuple, idx));\n }\n return computation->AddInstruction(HloInstruction::CreateTuple(gtes));\n}\nabsl::StatusOr InsertTokenIntoTuple(HloInstruction* tuple,\n bool add_token_operand) {\n CHECK(tuple->shape().IsTuple());\n HloComputation* computation = tuple->parent();\n std::vector original_users = tuple->users();\n HloInstruction* original_tuple = ReconstructTuple(tuple);\n for (HloInstruction* original_user : original_users) {\n for (int64_t idx : original_user->operand_indices(tuple)) {\n TF_RETURN_IF_ERROR(\n original_user->ReplaceOperandWith(idx, original_tuple));\n }\n }\n *tuple->mutable_shape()->add_tuple_shapes() = ShapeUtil::MakeTokenShape();\n if (add_token_operand) {\n tuple->AppendOperand(\n computation->AddInstruction(HloInstruction::CreateToken()));\n }\n HloInstruction* input_token_gte =\n computation->AddInstruction(HloInstruction::CreateGetTupleElement(\n tuple, tuple->shape().tuple_shapes_size() - 1));\n return input_token_gte;\n}\n} \nabsl::Status CanonicalizeConditionalInstruction(HloInstruction* conditional) {\n CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);\n for (HloComputation* branch : conditional->branch_computations()) {\n HloInstruction* parameter = branch->parameter_instruction(0);\n if (!parameter->shape().IsTuple()) {\n *parameter->mutable_shape() =\n ShapeUtil::MakeTupleShape({parameter->shape()});\n HloInstruction* original = branch->AddInstruction(\n HloInstruction::CreateGetTupleElement(parameter, 0));\n TF_RETURN_IF_ERROR(parameter->ReplaceAllUsesWithDifferentShape(original));\n }\n int64_t branch_operand_idx = conditional->branch_index(branch) + 1;\n HloInstruction* branch_tuple =\n conditional->mutable_operand(branch_operand_idx);\n if (!branch_tuple->shape().IsTuple()) {\n branch_tuple = conditional->parent()->AddInstruction(\n HloInstruction::CreateTuple({branch_tuple}));\n TF_RETURN_IF_ERROR(conditional->ReplaceOperandWithDifferentShape(\n branch_operand_idx, branch_tuple));\n }\n if (branch_tuple->opcode() == HloOpcode::kParameter) {\n branch_tuple = ReconstructTuple(branch_tuple);\n TF_RETURN_IF_ERROR(\n conditional->ReplaceOperandWith(branch_operand_idx, branch_tuple));\n }\n HloInstruction* root = branch->root_instruction();\n if (root->opcode() != HloOpcode::kTuple) {\n root = ReconstructTuple(root);\n branch->set_root_instruction(root);\n }\n }\n CHECK(conditional->shape().IsTuple());\n if (conditional->IsRoot()) {\n HloInstruction* new_root = ReconstructTuple(conditional);\n conditional->parent()->set_root_instruction(new_root);\n }\n return absl::OkStatus();\n}\nabsl::Status CanonicalizeWhileInstruction(HloInstruction* loop) {\n CHECK_EQ(loop->opcode(), HloOpcode::kWhile);\n HloComputation* body = loop->while_body();\n HloComputation* cond = loop->while_condition();\n HloInstruction* body_parameter = body->parameter_instruction(0);\n if (!body_parameter->shape().IsTuple()) {\n *body_parameter->mutable_shape() =\n ShapeUtil::MakeTupleShape({body_parameter->shape()});\n HloInstruction* original = body->AddInstruction(\n HloInstruction::CreateGetTupleElement(body_parameter, 0));\n TF_RETURN_IF_ERROR(\n body_parameter->ReplaceAllUsesWithDifferentShape(original));\n }\n HloInstruction* root = body->root_instruction();\n if (!root->shape().IsTuple()) {\n root = body->AddInstruction(HloInstruction::CreateTuple({root}));\n body->set_root_instruction(root, true);\n }\n HloInstruction* cond_parameter = cond->parameter_instruction(0);\n if (!cond_parameter->shape().IsTuple()) {\n *cond_parameter->mutable_shape() =\n ShapeUtil::MakeTupleShape({cond_parameter->shape()});\n HloInstruction* original = cond->AddInstruction(\n HloInstruction::CreateGetTupleElement(cond_parameter, 0));\n TF_RETURN_IF_ERROR(\n cond_parameter->ReplaceAllUsesWithDifferentShape(original));\n }\n if (!loop->shape().IsTuple()) {\n *loop->mutable_shape() = ShapeUtil::MakeTupleShape({loop->shape()});\n HloInstruction* original = loop->parent()->AddInstruction(\n HloInstruction::CreateGetTupleElement(loop, 0));\n TF_RETURN_IF_ERROR(loop->ReplaceAllUsesWithDifferentShape(original));\n }\n HloInstruction* loop_tuple = loop->mutable_operand(0);\n if (!loop_tuple->shape().IsTuple()) {\n loop_tuple = loop->parent()->AddInstruction(\n HloInstruction::CreateTuple({loop_tuple}));\n TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(0, loop_tuple));\n }\n if (loop_tuple->opcode() == HloOpcode::kParameter) {\n loop_tuple = ReconstructTuple(loop_tuple);\n TF_RETURN_IF_ERROR(loop->ReplaceOperandWith(0, loop_tuple));\n }\n if (root->opcode() != HloOpcode::kTuple) {\n root = ReconstructTuple(root);\n body->set_root_instruction(root);\n }\n if (loop->IsRoot()) {\n HloInstruction* new_root = ReconstructTuple(loop);\n loop->parent()->set_root_instruction(new_root);\n }\n return absl::OkStatus();\n}\nabsl::Status InfeedTokenPropagation::PropagateTokenThroughConditionalBranch() {\n HloComputation* comp = dangling_instruction_->parent();\n dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];\n CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kConditional);\n for (HloComputation* branch : dangling_instruction_->branch_computations()) {\n HloInstruction* root = branch->root_instruction();\n if (branch == comp) {\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(root, false).status());\n root->AppendOperand(output_token_);\n } else {\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(root, true).status());\n }\n }\n HloInstruction* parameter = comp->parameter_instruction(0);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * input_token_gte,\n InsertTokenIntoTuple(parameter, false));\n TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));\n int64_t branch_operand_idx = dangling_instruction_->branch_index(comp) + 1;\n HloInstruction* branch_tuple =\n dangling_instruction_->mutable_operand(branch_operand_idx);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * next_input_token_gte,\n InsertTokenIntoTuple(branch_tuple, true));\n TF_RETURN_IF_ERROR(dangling_instruction_->ReplaceOperandWithDifferentShape(\n branch_operand_idx, branch_tuple));\n input_token_ =\n branch_tuple->mutable_operand(next_input_token_gte->tuple_index());\n TF_ASSIGN_OR_RETURN(\n output_token_,\n InsertTokenIntoTuple(dangling_instruction_, false));\n return absl::OkStatus();\n}\nabsl::Status InfeedTokenPropagation::PropagateTokenThroughWhileBody() {\n HloComputation* comp = dangling_instruction_->parent();\n dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];\n CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kWhile);\n HloInstruction* root = comp->root_instruction();\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(root, false).status());\n root->AppendOperand(output_token_);\n HloInstruction* body_parameter = comp->parameter_instruction(0);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * input_token_gte,\n InsertTokenIntoTuple(body_parameter, false));\n TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));\n HloComputation* cond = dangling_instruction_->while_condition();\n HloInstruction* cond_parameter = cond->parameter_instruction(0);\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(cond_parameter, false)\n .status());\n HloInstruction* while_tuple = dangling_instruction_->mutable_operand(0);\n TF_ASSIGN_OR_RETURN(\n input_token_,\n InsertTokenIntoTuple(while_tuple, true));\n TF_RETURN_IF_ERROR(\n dangling_instruction_->ReplaceOperandWithDifferentShape(0, while_tuple));\n TF_ASSIGN_OR_RETURN(\n output_token_,\n InsertTokenIntoTuple(dangling_instruction_, false));\n return absl::OkStatus();\n}\nabsl::Status InfeedTokenPropagation::PropagateToken() {\n HloComputation* comp = dangling_instruction_->parent();\n if (comp->IsEntryComputation()) {\n return absl::OkStatus();\n }\n VLOG(2) << \"Propagating tokens for: \" << dangling_instruction_->name();\n HloInstruction* caller = call_graph_->GetComputationCallers(comp)[0];\n if (caller->has_sharding()) {\n return absl::OkStatus();\n }\n if (caller->opcode() == HloOpcode::kConditional) {\n TF_RETURN_IF_ERROR(CanonicalizeConditionalInstruction(caller));\n TF_RETURN_IF_ERROR(PropagateTokenThroughConditionalBranch());\n } else if (caller->opcode() == HloOpcode::kWhile &&\n comp == caller->while_body()) {\n TF_RETURN_IF_ERROR(CanonicalizeWhileInstruction(caller));\n TF_RETURN_IF_ERROR(PropagateTokenThroughWhileBody());\n } else {\n VLOG(2) << \"Unhandled computation: \" << comp->name();\n return absl::OkStatus();\n }\n return PropagateToken();\n}\nabsl::StatusOr InfeedTokenPropagation::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n VLOG(5) << \"Before InfeedTokenPropagation:\";\n XLA_VLOG_LINES(5, module->ToString());\n std::vector dangling_infeeds;\n std::vector dangling_outfeeds;\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n if (!computation->IsEntryComputation()) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kInfeed &&\n IsDanglingInfeed(instruction)) {\n VLOG(1) << \"Found dangling infeed: \" << instruction->ToString();\n dangling_infeeds.push_back(instruction);\n } else if (instruction->opcode() == HloOpcode::kOutfeed &&\n IsDanglingOutfeed(instruction)) {\n VLOG(1) << \"Found dangling outfeed: \" << instruction->ToString();\n dangling_outfeeds.push_back(instruction);\n }\n }\n }\n }\n bool changed = !dangling_infeeds.empty() || !dangling_outfeeds.empty();\n if (changed) {\n call_graph_ = CallGraph::Build(module);\n if (!call_graph_->IsFlattened()) {\n return FailedPrecondition(\n \"Call graph must be flattened before infeed token propagation.\");\n }\n }\n for (HloInstruction* dangling_infeed : dangling_infeeds) {\n dangling_instruction_ = dangling_infeed;\n input_token_ = dangling_infeed->mutable_operand(0);\n output_token_ = dangling_infeed->AddInstruction(\n HloInstruction::CreateGetTupleElement(dangling_infeed, 1));\n TF_RETURN_IF_ERROR(PropagateToken());\n }\n for (HloInstruction* dangling_outfeed : dangling_outfeeds) {\n dangling_instruction_ = dangling_outfeed;\n input_token_ = dangling_outfeed->mutable_operand(1);\n output_token_ = dangling_outfeed;\n TF_RETURN_IF_ERROR(PropagateToken());\n }\n if (changed) {\n TF_RETURN_IF_ERROR(\n TupleSimplifier().Run(module, execution_threads).status());\n TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());\n }\n VLOG(5) << \"After InfeedTokenPropagation:\";\n XLA_VLOG_LINES(5, module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/infeed_token_propagation.h\"\n#include \n#include \n#include \n#include \n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nclass InfeedTokenPropagationTest : public HloTestBase {\n protected:\n InfeedTokenPropagationTest() = default;\n};\nTEST_F(InfeedTokenPropagationTest, EntryComputationInfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\nENTRY main {\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT gte.0 = get-tuple-element(infeed.0), index=0\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(InfeedTokenPropagationTest, EntryComputationOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\nENTRY main {\n arg.0 = s32[] parameter(0)\n tuple.0 = tuple(arg.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.1 = tuple()\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(InfeedTokenPropagationTest, ConditionalInfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple()\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1)));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, ConditionalOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = (s32[]) parameter(0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple(arg.0)\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, ConditionalDuplicateOperand) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n pred.0 = pred[] constant(true)\n tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, tuple.0, tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n const HloInstruction* true_tuple = cond->operand(1);\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());\n const HloInstruction* false_tuple = cond->operand(2);\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1)));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, NonTupleConditional) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = s32[] parameter(0)\n outfeed_tuple.0 = tuple(arg.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n pred.0 = pred[] constant(true)\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, arg.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = cond->mutable_operand(1);\n EXPECT_TRUE(true_tuple->shape().IsTuple());\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, DisjointConditionalOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n ROOT arg.0 = () parameter(0)\n one.0 = s32[] constant(1)\n outfeed_tuple.0 = tuple(one.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple()\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, WhileInfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT tuple.0 = tuple()\n}\ncond {\n arg.0 = () parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n while_tuple.0 = tuple()\n ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1)));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, WhileOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n arg.0 = (s32[]) parameter(0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])\n gte.0 = get-tuple-element(arg.0), index=0\n ROOT tuple.0 = tuple(gte.0)\n}\ncond {\n arg.0 = (s32[]) parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n while_tuple.0 = tuple(arg.0)\n ROOT while.0 = (s32[]) while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(), op::Outfeed()));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, DisjointWhileOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n ROOT arg.0 = () parameter(0)\n one.0 = s32[] constant(1)\n outfeed_tuple.0 = tuple(one.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])\n}\ncond {\n arg.0 = () parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n while_tuple.0 = tuple()\n ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, NonTupleWhile) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n ROOT arg.0 = s32[] parameter(0)\n tuple.0 = tuple(arg.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])\n}\ncond {\n arg.0 = s32[] parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n ROOT while.0 = s32[] while(arg.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_TRUE(loop->shape().IsTuple());\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());\n EXPECT_THAT(loop->operand(0), op::Tuple(op::Parameter(), op::AfterAll()));\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(), op::Outfeed()));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, NestedInfeedOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = (s32[]) parameter(0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\ncomp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n gte.0 = get-tuple-element(infeed.0), index=0\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple(gte.0)\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\ncond {\n arg.0 = () parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n while_tuple.0 = tuple()\n ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());\n EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1),\n op::GetTupleElement(op::Conditional(), 0)));\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1140,"cells":{"ID":{"kind":"string","value":"a9a26e45-6caf-4a8e-84d6-4f99d3d89f0a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"while_loop_constant_sinking"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/while_loop_constant_sinking.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/while_loop_constant_sinking_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/while_loop_constant_sinking.h\"\n#include \"absl/algorithm/container.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"xla/service/while_util.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\nnamespace xla {\nnamespace {\nabsl::Status ReplaceUsesWhileKeepingLoopInvariance(\n HloInstruction* old_instr, HloInstruction* new_instr,\n HloInstruction* while_body_root, int64_t tuple_index) {\n CHECK_EQ(while_body_root->opcode(), HloOpcode::kTuple);\n std::vector users;\n users.reserve(old_instr->user_count());\n absl::c_copy(old_instr->users(), std::back_inserter(users));\n for (auto* user : users) {\n for (int64_t i = 0, e = user->operand_count(); i < e; i++) {\n if (user->operand(i) == old_instr &&\n !(user == while_body_root && i == tuple_index)) {\n TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, new_instr));\n }\n }\n }\n return absl::OkStatus();\n}\nHloInstruction* CloneHelper(const HloInstruction* instruction,\n HloComputation* computation) {\n if (instruction->opcode() == HloOpcode::kConstant) {\n return computation->AddInstruction(instruction->Clone(\".sunk\"));\n }\n if (instruction->opcode() == HloOpcode::kBroadcast) {\n return computation->AddInstruction(instruction->CloneWithNewOperands(\n instruction->shape(),\n {CloneHelper(instruction->operand(0), computation)}));\n }\n LOG(FATAL) << \"Unexpected instruction.\";\n}\n} \nabsl::StatusOr WhileLoopConstantSinking::TrySinkingConstantsIntoWhileLoop(\n HloInstruction* while_instr) {\n HloComputation* while_cond = while_instr->while_condition();\n HloComputation* while_body = while_instr->while_body();\n const HloInstruction& init_value = *while_instr->operand(0);\n if (init_value.opcode() != HloOpcode::kTuple) {\n return false;\n }\n bool changed = false;\n absl::flat_hash_map>\n conditional_gte_index_to_insts =\n WhileUtil::GetGTEsMapForWhileConditional(*while_cond);\n std::vector invariant_body_gtes =\n WhileUtil::GetInvariantGTEsForWhileBody(*while_body);\n for (HloInstruction* invariant_body_gte : invariant_body_gtes) {\n int64_t index = invariant_body_gte->tuple_index();\n const HloInstruction& invariant_value = *init_value.operand(index);\n if (invariant_value.opcode() != HloOpcode::kConstant &&\n (!sink_broadcast_of_constants_ ||\n invariant_value.opcode() != HloOpcode::kBroadcast ||\n invariant_value.operand(0)->opcode() != HloOpcode::kConstant)) {\n continue;\n }\n if (sink_only_scalar_constants_) {\n if (!ShapeUtil::IsScalar(init_value.operand(index)->shape())) {\n continue;\n }\n }\n if (invariant_body_gte->user_count() > 1) {\n HloInstruction* constant_instr =\n CloneHelper(&invariant_value, while_body);\n TF_RETURN_IF_ERROR(ReplaceUsesWhileKeepingLoopInvariance(\n invariant_body_gte, constant_instr, while_body->root_instruction(),\n index));\n changed = true;\n }\n auto it = conditional_gte_index_to_insts.find(index);\n if (it == conditional_gte_index_to_insts.end()) {\n continue;\n }\n for (HloInstruction* invariant_cond_gte : it->second) {\n if (invariant_cond_gte->user_count() > 0) {\n HloInstruction* constant_instr =\n CloneHelper(&invariant_value, while_cond);\n TF_RETURN_IF_ERROR(\n invariant_cond_gte->ReplaceAllUsesWith(constant_instr));\n changed = true;\n }\n }\n }\n return changed;\n}\nabsl::StatusOr WhileLoopConstantSinking::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n VLOG(2) << \"HLO module before WhileLoopConstantSinking:\";\n XLA_VLOG_LINES(2, module->ToString());\n bool changed = false;\n std::vector while_instrs;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),\n HloPredicateIsOp);\n }\n for (HloInstruction* while_instr : while_instrs) {\n TF_ASSIGN_OR_RETURN(bool result,\n TrySinkingConstantsIntoWhileLoop(while_instr));\n changed |= result;\n }\n if (changed) {\n VLOG(2) << \"HLO module after WhileLoopConstantSinking:\";\n XLA_VLOG_LINES(2, module->ToString());\n } else {\n VLOG(2) << \"HLO module unchanged after WhileLoopConstantSinking\";\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/while_loop_constant_sinking.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nusing ::testing::_;\nusing WhileLoopConstantSinkingTest = HloTestBase;\nTEST_F(WhileLoopConstantSinkingTest, SinkOneConstant) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[2],f32[2]) parameter(0)\n p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0\n p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1\n add.0 = f32[2] add(p_body.0, p_body.1)\n ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)\n}\ncondition {\n p_cond = (f32[2],f32[2]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[2] constant({1, 2})\n const_1 = f32[2] constant({2, 1})\n while_init = (f32[2],f32[2]) tuple(const_0, const_1)\n ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n WhileLoopConstantSinking(false,\n true)\n .Run(module.get()));\n ASSERT_FALSE(changed);\n TF_ASSERT_OK_AND_ASSIGN(\n changed, WhileLoopConstantSinking(false,\n false)\n .Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::Add(_, op::Constant()), _));\n}\nTEST_F(WhileLoopConstantSinkingTest, SinkBroadcastOfConstant) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[16],f32[16]) parameter(0)\n p_body.0 = get-tuple-element(p_body), index=0\n p_body.1 = get-tuple-element(p_body), index=1\n add.0 = add(p_body.0, p_body.1)\n ROOT root = tuple(add.0, p_body.1)\n}\ncondition {\n p_cond = (f32[16],f32[16]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[] constant(1)\n const_1 = f32[] constant(2)\n broadcast_0 = f32[16] broadcast(const_0), dimensions={}\n broadcast_1 = f32[16] broadcast(const_1), dimensions={}\n while_init = tuple(broadcast_0, broadcast_1)\n ROOT while = while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n WhileLoopConstantSinking(false)\n .Run(module.get()));\n ASSERT_FALSE(changed);\n TF_ASSERT_OK_AND_ASSIGN(\n changed, WhileLoopConstantSinking(true)\n .Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::Add(_, op::Broadcast(op::Constant())), _));\n}\nTEST_F(WhileLoopConstantSinkingTest, KeepConstantsLoopInvariant) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[2],f32[2],f32[2]) parameter(0)\n p_body.0 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=0\n p_body.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=1\n p_body.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=2\n add.0 = f32[2] add(p_body.1, p_body.2)\n ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_body.1, p_body.2)\n}\ncondition {\n p_cond = (f32[2],f32[2],f32[2]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[2] constant({1, 2})\n const_1 = f32[2] constant({2, 1})\n const_2 = f32[2] constant({3, 1})\n while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)\n ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::Add(op::Constant(), op::Constant()),\n op::GetTupleElement(op::Parameter(0)),\n op::GetTupleElement(op::Parameter(0))));\n}\nTEST_F(WhileLoopConstantSinkingTest, TupleShapedConstants) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_b = (f32[2],(f32[2],f32[2])) parameter(0)\n p_b.0 = f32[2] get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=0\n p_b.1 = (f32[2],f32[2]) get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=1\n p_b.1.1 = f32[2] get-tuple-element(p_b.1), index=0\n ROOT root = (f32[2],(f32[2],f32[2])) tuple(p_b.1.1, p_b.1)\n}\ncondition {\n p_cond = (f32[2],(f32[2],f32[2])) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[2] constant({1, 2})\n const_1 = (f32[2], f32[2]) constant(({2, 1},{3,1}))\n while_init = (f32[2],(f32[2],f32[2])) tuple(const_0, const_1)\n ROOT while = (f32[2],(f32[2],f32[2])) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Constant(), 0),\n op::GetTupleElement(op::Parameter(0))));\n}\nTEST_F(WhileLoopConstantSinkingTest, DuplicateGTEs) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_b = (f32[2],f32[2],f32[2]) parameter(0)\n p_b.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=1\n p_b.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2\n p_b.2.dup = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2\n add.0 = f32[2] add(p_b.1, p_b.2.dup)\n ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_b.1, p_b.2)\n}\ncondition {\n p_cond = (f32[2],f32[2],f32[2]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[2] constant({1, 2})\n const_1 = f32[2] constant({2, 1})\n const_2 = f32[2] constant({3, 1})\n while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2)\n ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::Add(op::Constant(), ::testing::Not(op::Constant())),\n op::GetTupleElement(op::Parameter(0)),\n op::GetTupleElement(op::Parameter(0))));\n}\nTEST_F(WhileLoopConstantSinkingTest, DontCreateDeadConstant) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[2],f32[2]) parameter(0)\n p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0\n p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1\n token0 = token[] after-all()\n outfeed = token[] outfeed(p_body.0, token0)\n ROOT root = (f32[2],f32[2],f32[2]) tuple(p_body.0, p_body.1, p_body.1)\n}\ncondition {\n p_cond = (f32[2],f32[2]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[2] constant({1, 2})\n const_1 = f32[2] constant({2, 1})\n while_init = (f32[2],f32[2]) tuple(const_0, const_1)\n ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition,\n body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::GetTupleElement(), op::GetTupleElement(),\n op::GetTupleElement()));\n for (const HloInstruction* inst : while_body->instructions()) {\n if (inst->opcode() == HloOpcode::kConstant) {\n EXPECT_GT(inst->user_count(), 0);\n }\n }\n}\nTEST_F(WhileLoopConstantSinkingTest, ConditionalSinkConstant) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[],f32[]) parameter(0)\n p_body.0 = f32[] get-tuple-element((f32[],f32[]) p_body), index=0\n const = f32[] constant(1)\n add = f32[] add(p_body.0, const)\n p_body.1 = f32[] get-tuple-element((f32[],f32[]) p_body), index=1\n ROOT root = (f32[],f32[]) tuple(add, p_body.1)\n}\ncondition {\n p_cond = (f32[],f32[]) parameter(0)\n p_cond.0 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=0\n p_cond.1 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=1\n ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT\n}\nENTRY entry {\n const_0 = f32[] constant(0)\n const_1 = f32[] constant(10)\n while_init = (f32[],f32[]) tuple(const_0, const_1)\n ROOT while = (f32[],f32[]) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_condition = module->GetComputationWithName(\"condition\");\n EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));\n}\nTEST_F(WhileLoopConstantSinkingTest, ConditionalTupleShapedConstants) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_b = (f32[],(f32[],f32[])) parameter(0)\n p_b.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_b), index=0\n p_b.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_b), index=1\n p_b.1.0 = f32[] get-tuple-element((f32[],f32[]) p_b.1), index=0\n add = f32[] add(p_b.0, p_b.1.0)\n ROOT root = (f32[],(f32[],f32[])) tuple(add, p_b.1)\n}\ncondition {\n p_c = (f32[],(f32[],f32[])) parameter(0)\n p_c.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_c), index=0\n p_c.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_c), index=1\n p_c.1.1 = f32[] get-tuple-element((f32[],f32[]) p_c.1), index=1\n ROOT result = pred[] compare(p_c.0, p_c.1.1), direction=LT\n}\nENTRY entry {\n const_0 = f32[] constant(0)\n const_1 = (f32[], f32[]) constant((1, 10))\n while_init = (f32[],(f32[],f32[])) tuple(const_0, const_1)\n ROOT while = (f32[],(f32[],f32[])) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_condition = module->GetComputationWithName(\"condition\");\n EXPECT_THAT(while_condition->root_instruction(),\n op::Lt(_, op::GetTupleElement(op::Constant())));\n}\nTEST_F(WhileLoopConstantSinkingTest, ConditionalDontCreateDeadConstant) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[],f32[],f32[]) parameter(0)\n p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0\n const = f32[] constant(1)\n add = f32[] add(p_body.0, const)\n p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1\n p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2\n ROOT root = (f32[],f32[],f32[]) tuple(add, p_body.1, p_body.2)\n}\ncondition {\n p_cond = (f32[],f32[],f32[]) parameter(0)\n p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0\n p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1\n p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2\n ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT\n}\nENTRY entry {\n const_0 = f32[] constant(0)\n const_1 = f32[] constant(10)\n const_2 = f32[] constant(12)\n while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)\n ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_condition = module->GetComputationWithName(\"condition\");\n EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant()));\n for (const HloInstruction* inst : while_condition->instructions()) {\n if (inst->opcode() == HloOpcode::kConstant) {\n EXPECT_GT(inst->user_count(), 0);\n }\n }\n}\nTEST_F(WhileLoopConstantSinkingTest, ConditionalMultipleSameIndexGTEs) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[],f32[],f32[]) parameter(0)\n p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0\n const = f32[] constant(1)\n add.0 = f32[] add(p_body.0, const)\n p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1\n add.1 = f32[] add(p_body.1, const)\n p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2\n ROOT root = (f32[],f32[],f32[]) tuple(add.0, add.1, p_body.2)\n}\ncondition {\n p_cond = (f32[],f32[],f32[]) parameter(0)\n p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0\n p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2\n lt.0 = pred[] compare(p_cond.0, p_cond.2), direction=LT\n p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1\n p_cond.2.c = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2\n lt.1 = pred[] compare(p_cond.1, p_cond.2.c), direction=LT\n ROOT result = pred[] and(lt.0, lt.1)\n}\nENTRY entry {\n const_0 = f32[] constant(0)\n const_1 = f32[] constant(0)\n const_2 = f32[] constant(12)\n while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2)\n ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConstantSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_condition = module->GetComputationWithName(\"condition\");\n EXPECT_THAT(while_condition->root_instruction(),\n op::And(op::Lt(_, op::Constant()), op::Lt(_, op::Constant())));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1141,"cells":{"ID":{"kind":"string","value":"9131b767-6a50-48e5-8e87-4d8d59db6611"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"rendezvous"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/framework/rendezvous.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/framework/rendezvous_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/framework/rendezvous.h\"\n#include \n#include \n#include \n#include \n#include \"tensorflow/core/framework/local_rendezvous.h\"\n#include \"tensorflow/core/lib/core/errors.h\"\n#include \"tensorflow/core/lib/core/notification.h\"\n#include \"tensorflow/core/lib/gtl/flatmap.h\"\n#include \"tensorflow/core/lib/gtl/manual_constructor.h\"\n#include \"tensorflow/core/lib/hash/hash.h\"\n#include \"tensorflow/core/lib/strings/str_util.h\"\n#include \"tensorflow/core/platform/logging.h\"\n#include \"tensorflow/core/platform/macros.h\"\n#include \"tensorflow/core/platform/mutex.h\"\n#include \"tensorflow/core/platform/thread_annotations.h\"\n#include \"tensorflow/core/platform/types.h\"\nnamespace tensorflow {\nRendezvous::ParsedKey& Rendezvous::ParsedKey::operator=(const ParsedKey& b) {\n const char* b_base = b.buf_.data();\n buf_ = b.buf_;\n src_device = StringPiece(buf_.data() + (b.src_device.data() - b_base),\n b.src_device.size());\n src = b.src;\n src_incarnation = b.src_incarnation;\n dst_device = StringPiece(buf_.data() + (b.dst_device.data() - b_base),\n b.dst_device.size());\n dst = b.dst;\n edge_name = StringPiece(buf_.data() + (b.edge_name.data() - b_base),\n b.edge_name.size());\n return *this;\n}\nstring Rendezvous::CreateKey(const string& src_device, uint64 src_incarnation,\n const string& dst_device, const string& name,\n const FrameAndIter& frame_iter) {\n char buf[strings::kFastToBufferSize];\n return strings::StrCat(\n src_device, \";\", strings::Uint64ToHexString(src_incarnation, buf), \";\",\n dst_device, \";\", name, \";\", frame_iter.frame_id, \":\", frame_iter.iter_id);\n}\nstatic StringPiece ConsumeNextPart(StringPiece* s, char delim) {\n for (size_t offset = 0; offset < s->size(); offset++) {\n if ((*s)[offset] == delim) {\n StringPiece result(s->data(), offset);\n s->remove_prefix(offset + 1); \n return result;\n }\n }\n StringPiece result(s->data(), s->size());\n s->remove_prefix(s->size());\n return result;\n}\nStatus Rendezvous::ParseKey(StringPiece key, ParsedKey* out) {\n if (key.data() == out->buf_.data()) {\n DCHECK_EQ(key.size(), out->buf_.size());\n } else {\n out->buf_.assign(key.data(), key.size());\n }\n StringPiece s(out->buf_);\n StringPiece parts[5];\n for (int i = 0; i < 5; i++) {\n parts[i] = ConsumeNextPart(&s, ';');\n }\n if (s.empty() && \n !parts[4].empty() && \n DeviceNameUtils::ParseFullName(parts[0], &out->src) &&\n strings::HexStringToUint64(parts[1], &out->src_incarnation) &&\n DeviceNameUtils::ParseFullName(parts[2], &out->dst) &&\n !parts[3].empty()) {\n out->src_device = StringPiece(parts[0].data(), parts[0].size());\n out->dst_device = StringPiece(parts[2].data(), parts[2].size());\n out->edge_name = StringPiece(parts[3].data(), parts[3].size());\n return absl::OkStatus();\n }\n return errors::InvalidArgument(\"Invalid rendezvous key: \", key);\n}\nRendezvousInterface::~RendezvousInterface() {}\nStatus RendezvousInterface::Recv(const ParsedKey& key, const Args& recv_args,\n Tensor* val, bool* is_dead,\n int64_t timeout_ms) {\n Status ret;\n Notification n;\n RecvAsync(key, recv_args,\n [&ret, &n, val, is_dead](const Status& s, const Args& send_args,\n const Args& recv_args, const Tensor& v,\n const bool dead) {\n ret = s;\n *val = v;\n *is_dead = dead;\n n.Notify();\n });\n if (timeout_ms > 0) {\n int64_t timeout_us = timeout_ms * 1000;\n bool notified = WaitForNotificationWithTimeout(&n, timeout_us);\n if (!notified) {\n return Status(absl::StatusCode::kDeadlineExceeded,\n \"Timed out waiting for notification\");\n }\n } else {\n n.WaitForNotification();\n }\n return ret;\n}\nStatus RendezvousInterface::Recv(const ParsedKey& key, const Args& args,\n Tensor* val, bool* is_dead) {\n const int64_t no_timeout = 0;\n return Recv(key, args, val, is_dead, no_timeout);\n}\nnamespace {\nclass LocalRendezvousWrapper : public Rendezvous {\n public:\n LocalRendezvousWrapper(int num_shards) : impl_(this, num_shards) {}\n Status Send(const ParsedKey& key, const Args& send_args, const Tensor& val,\n const bool is_dead) override {\n return impl_.Send(key, send_args, val, is_dead);\n }\n void RecvAsync(const ParsedKey& key, const Args& recv_args,\n DoneCallback done) override {\n impl_.RecvAsync(key, recv_args, std::move(done));\n }\n void StartAbort(const Status& status) override { impl_.StartAbort(status); }\n private:\n LocalRendezvous impl_;\n LocalRendezvousWrapper(const LocalRendezvousWrapper&) = delete;\n void operator=(const LocalRendezvousWrapper&) = delete;\n};\n} \nRendezvous* NewLocalRendezvous(int num_shards) {\n return new LocalRendezvousWrapper(num_shards);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/framework/rendezvous.h\"\n#include \"absl/status/status.h\"\n#include \"unsupported/Eigen/CXX11/Tensor\" \n#include \"tensorflow/core/framework/cancellation.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_shape.h\"\n#include \"tensorflow/core/framework/tensor_types.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/lib/core/errors.h\"\n#include \"tensorflow/core/lib/core/notification.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/lib/core/threadpool.h\"\n#include \"tensorflow/core/lib/random/simple_philox.h\"\n#include \"tensorflow/core/lib/strings/strcat.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/logging.h\"\n#include \"tensorflow/core/platform/mutex.h\"\n#include \"tensorflow/core/platform/notification.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/test_benchmark.h\"\n#include \"tensorflow/core/platform/types.h\"\nnamespace tensorflow {\nnamespace {\nTEST(RendezvousTest, Key) {\n const string key = Rendezvous::CreateKey(\n \"/job:mnist/replica:1/task:2/CPU:0\", 7890,\n \"/job:mnist/replica:1/task:2/device:GPU:0\", \"var0\", FrameAndIter(0, 0));\n EXPECT_EQ(key,\n \"/job:mnist/replica:1/task:2/CPU:0;\"\n \"0000000000001ed2;\" \n \"/job:mnist/replica:1/task:2/device:GPU:0;\"\n \"var0;\"\n \"0:0\");\n Rendezvous::ParsedKey parsed;\n TF_EXPECT_OK(Rendezvous::ParseKey(key, &parsed));\n EXPECT_EQ(parsed.src_device, \"/job:mnist/replica:1/task:2/CPU:0\");\n EXPECT_EQ(parsed.src_incarnation, 7890);\n EXPECT_EQ(parsed.src.type, \"CPU\");\n EXPECT_EQ(parsed.dst_device, \"/job:mnist/replica:1/task:2/device:GPU:0\");\n EXPECT_EQ(parsed.dst.type, \"GPU\");\n EXPECT_FALSE(Rendezvous::ParseKey(\"foo;bar;baz\", &parsed).ok());\n EXPECT_FALSE(Rendezvous::ParseKey(\"/job:mnist/replica:1/task:2/CPU:0;\"\n \"/job:mnist/replica:1/task:2/device:GPU:0;\",\n &parsed)\n .ok());\n EXPECT_FALSE(\n Rendezvous::ParseKey(strings::StrCat(key, \";\", key), &parsed).ok());\n}\nclass LocalRendezvousTest : public ::testing::Test {\n public:\n LocalRendezvousTest() : threads_(Env::Default(), \"test\", 16) {\n rendez_ = NewLocalRendezvous();\n }\n ~LocalRendezvousTest() override { rendez_->Unref(); }\n void SchedClosure(std::function fn) {\n threads_.Schedule(std::move(fn));\n }\n Rendezvous* rendez_;\n private:\n thread::ThreadPool threads_;\n};\nTensor V(const string& content) {\n Tensor tensor(DT_STRING, TensorShape({}));\n tensor.scalar()() = content;\n return tensor;\n}\nstring V(const Tensor& tensor) {\n CHECK_EQ(tensor.dtype(), DT_STRING);\n CHECK(TensorShapeUtils::IsScalar(tensor.shape()));\n return tensor.scalar()();\n}\nRendezvous::ParsedKey MakeKey(const string& name) {\n string s = Rendezvous::CreateKey(\"/job:mnist/replica:1/task:2/CPU:0\", 7890,\n \"/job:mnist/replica:1/task:2/device:GPU:0\",\n name, FrameAndIter(0, 0));\n Rendezvous::ParsedKey k;\n TF_EXPECT_OK(Rendezvous::ParseKey(s, &k));\n return k;\n}\nconst Rendezvous::ParsedKey& KeyFoo() {\n static auto* key = new Rendezvous::ParsedKey(MakeKey(\"foo\"));\n return *key;\n}\nconst Rendezvous::ParsedKey& KeyBar() {\n static auto* key = new Rendezvous::ParsedKey(MakeKey(\"bar\"));\n return *key;\n}\nTEST_F(LocalRendezvousTest, SendRecv) {\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V(\"hello\"), false));\n Tensor val(DT_STRING);\n bool is_dead = false;\n TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));\n EXPECT_EQ(\"hello\", V(val));\n}\nTEST_F(LocalRendezvousTest, RecvSend) {\n SchedClosure([this]() {\n Env::Default()->SleepForMicroseconds(10000);\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V(\"hello\"), false));\n });\n Tensor val(DT_STRING);\n bool is_dead = false;\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));\n EXPECT_EQ(\"hello\", V(val));\n}\nTEST_F(LocalRendezvousTest, PingPong) {\n SchedClosure([this]() {\n Tensor t(DT_STRING);\n bool is_dead = false;\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &t, &is_dead));\n TF_ASSERT_OK(rendez_->Send(KeyBar(), args, t, is_dead));\n });\n Env::Default()->SleepForMicroseconds(1000000);\n Tensor val(DT_STRING);\n bool val_dead = false;\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V(\"secret msg\"), val_dead));\n TF_ASSERT_OK(rendez_->Recv(KeyBar(), args, &val, &val_dead));\n EXPECT_EQ(\"secret msg\", V(val));\n}\nTEST_F(LocalRendezvousTest, CancelBeforeRecv) {\n auto* cm = new CancellationManager();\n Tensor val(DT_STRING);\n bool is_dead = false;\n Rendezvous::Args args;\n args.cancellation_manager = cm;\n cm->StartCancel();\n auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead);\n EXPECT_FALSE(s.ok());\n EXPECT_TRUE(absl::IsCancelled(s));\n EXPECT_EQ(\"RecvAsync is cancelled.\", s.message());\n delete cm;\n}\nTEST_F(LocalRendezvousTest, CancelAfterRecv) {\n auto* cm = new CancellationManager();\n Notification n;\n SchedClosure([cm, &n]() {\n Env::Default()->SleepForMicroseconds(10000);\n cm->StartCancel();\n n.Notify();\n });\n Tensor val(DT_STRING);\n bool is_dead = false;\n Rendezvous::Args args;\n args.cancellation_manager = cm;\n auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead);\n EXPECT_FALSE(s.ok());\n EXPECT_TRUE(absl::IsCancelled(s));\n EXPECT_EQ(\"RecvAsync is cancelled.\", s.message());\n n.WaitForNotification();\n delete cm;\n}\nTEST_F(LocalRendezvousTest, CancelEmptyQueue) {\n auto* cm = new CancellationManager();\n Notification n;\n SchedClosure([this, cm, &n]() {\n Env::Default()->SleepForMicroseconds(10000);\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V(\"hello\"), false));\n cm->StartCancel();\n n.Notify();\n });\n Tensor val(DT_STRING);\n bool is_dead = false;\n Rendezvous::Args args;\n args.cancellation_manager = cm;\n TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));\n EXPECT_EQ(\"hello\", V(val));\n n.WaitForNotification();\n delete cm;\n}\nTEST_F(LocalRendezvousTest, CancelMultiple) {\n auto* cm = new CancellationManager();\n SchedClosure([this, cm]() {\n Env::Default()->SleepForMicroseconds(10000);\n Rendezvous::Args args;\n cm->StartCancel();\n TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V(\"hello\"), false));\n TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V(\"hello\"), false));\n });\n Tensor val(DT_STRING);\n Rendezvous::Args args;\n Rendezvous::Args args_with_cancellation;\n args_with_cancellation.cancellation_manager = cm;\n Notification n0;\n Notification n1;\n Notification n2;\n Notification n3;\n Status s0;\n Status s1;\n Status s2;\n Status s3;\n rendez_->RecvAsync(\n KeyFoo(), args,\n [&n0, &s0](const Status& s, const Rendezvous::Args& send_args,\n const Rendezvous::Args& recv_args, const Tensor& v,\n const bool dead) {\n s0.Update(s);\n n0.Notify();\n });\n rendez_->RecvAsync(\n KeyFoo(), args_with_cancellation,\n [&n1, &s1](const Status& s, const Rendezvous::Args& send_args,\n const Rendezvous::Args& recv_args, const Tensor& v,\n const bool dead) {\n s1.Update(s);\n n1.Notify();\n });\n rendez_->RecvAsync(\n KeyFoo(), args,\n [&n2, &s2](const Status& s, const Rendezvous::Args& send_args,\n const Rendezvous::Args& recv_args, const Tensor& v,\n const bool dead) {\n s2.Update(s);\n n2.Notify();\n });\n rendez_->RecvAsync(\n KeyFoo(), args_with_cancellation,\n [&n3, &s3](const Status& s, const Rendezvous::Args& send_args,\n const Rendezvous::Args& recv_args, const Tensor& v,\n const bool dead) {\n s3.Update(s);\n n3.Notify();\n });\n n0.WaitForNotification();\n n1.WaitForNotification();\n n2.WaitForNotification();\n n3.WaitForNotification();\n TF_ASSERT_OK(s0);\n TF_ASSERT_OK(s2);\n EXPECT_FALSE(s1.ok());\n EXPECT_FALSE(s3.ok());\n delete cm;\n}\nstruct BlockingState {\n mutex lock;\n int counter = 0;\n Notification done;\n};\nTEST_F(LocalRendezvousTest, RandomSendRecv) {\n static const int N = 100;\n random::PhiloxRandom philox(testing::RandomSeed(), 17);\n random::SimplePhilox rnd(&philox);\n BlockingState state;\n state.counter = N;\n for (int i = 0; i < N; ++i) {\n int micros = 100 + rnd.Uniform(1000);\n SchedClosure([this, i, micros]() {\n Env::Default()->SleepForMicroseconds(micros);\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(MakeKey(strings::StrCat(i)), args,\n V(strings::StrCat(i)), false));\n });\n auto recv_done = [this, &state, i](const Status& status,\n const Rendezvous::Args& sender_args,\n const Rendezvous::Args& recver_args,\n const Tensor& val, const bool val_dead) {\n EXPECT_EQ(strings::StrCat(i), V(val));\n bool done = false;\n {\n mutex_lock l(state.lock);\n state.counter--;\n if (state.counter == 0) {\n done = true;\n }\n }\n if (done) {\n state.done.Notify();\n }\n };\n micros = 100 + rnd.Uniform(1000);\n SchedClosure([this, i, micros, recv_done]() {\n Env::Default()->SleepForMicroseconds(micros);\n rendez_->RecvAsync(MakeKey(strings::StrCat(i)), Rendezvous::Args(),\n recv_done);\n });\n }\n state.done.WaitForNotification();\n}\nvoid RandomSleep() {\n if (std::rand() % 10 == 0) {\n Env::Default()->SleepForMicroseconds(1000);\n }\n}\nTEST_F(LocalRendezvousTest, MultiSends) {\n static const int N = 100;\n const auto& key_foo = KeyFoo();\n Rendezvous::Args args;\n SchedClosure([=]() {\n for (int i = 0; i < N; ++i) {\n TF_ASSERT_OK(rendez_->Send(key_foo, args, V(strings::StrCat(i)), false));\n RandomSleep();\n }\n });\n Tensor val;\n bool val_dead;\n for (int i = 0; i < N; ++i) {\n TF_ASSERT_OK(rendez_->Recv(key_foo, args, &val, &val_dead));\n RandomSleep();\n }\n}\nTEST_F(LocalRendezvousTest, RecvAbort) {\n rendez_->Ref();\n SchedClosure([this]() {\n rendez_->StartAbort(errors::Aborted(\"\")); \n rendez_->Unref();\n });\n Tensor val(DT_STRING);\n bool val_dead = false;\n Rendezvous::Args args;\n Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead);\n EXPECT_TRUE(absl::IsAborted(status));\n}\nTEST_F(LocalRendezvousTest, RecvSleepAbort) {\n rendez_->Ref();\n SchedClosure([this]() {\n Env::Default()->SleepForMicroseconds(1000000);\n rendez_->StartAbort(errors::Aborted(\"\")); \n rendez_->Unref();\n });\n Tensor val(DT_STRING);\n bool val_dead = false;\n Rendezvous::Args args;\n Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead);\n EXPECT_TRUE(absl::IsAborted(status));\n}\nTEST_F(LocalRendezvousTest, AbortThenRecvOrSend) {\n rendez_->StartAbort(errors::Aborted(\"\"));\n Tensor val(DT_STRING);\n bool val_dead = false;\n Rendezvous::Args args;\n EXPECT_TRUE(absl::IsAborted(rendez_->Send(KeyFoo(), args, val, val_dead)));\n EXPECT_TRUE(absl::IsAborted(rendez_->Recv(KeyFoo(), args, &val, &val_dead)));\n}\nclass DummyDeviceContext : public DeviceContext {\n public:\n explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}\n ~DummyDeviceContext() override {}\n int stream_id() const { return stream_id_; }\n void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,\n Tensor* output_tensor,\n StatusCallback done) const override {\n done(absl::OkStatus());\n }\n private:\n const int stream_id_;\n};\nTEST_F(LocalRendezvousTest, TransferDummyDeviceContext) {\n Rendezvous::Args args;\n args.device_context = new DummyDeviceContext(123);\n TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V(\"hello\"), false));\n Notification n;\n Rendezvous::Args args1;\n args1.device_context = new DummyDeviceContext(1);\n rendez_->RecvAsync(\n KeyFoo(), args1,\n [&n](const Status& s, const Rendezvous::Args& send_args,\n const Rendezvous::Args& recv_args, const Tensor& val, bool is_dead) {\n CHECK_EQ(123, dynamic_cast(\n send_args.device_context)\n ->stream_id());\n n.Notify();\n });\n n.WaitForNotification();\n args.device_context->Unref();\n args1.device_context->Unref();\n}\nvoid BM_SendRecv(::testing::benchmark::State& state) {\n Rendezvous* rendez = NewLocalRendezvous();\n Tensor orig = V(\"val\");\n Tensor val(DT_STRING, TensorShape({}));\n bool is_dead = false;\n Rendezvous::Args args;\n for (auto s : state) {\n TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead));\n TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &val, &is_dead));\n }\n CHECK_EQ(V(val), V(orig));\n rendez->Unref();\n}\nBENCHMARK(BM_SendRecv);\nvoid BM_RecvSend(::testing::benchmark::State& state) {\n Rendezvous* rendez = NewLocalRendezvous();\n Tensor orig = V(\"val\");\n Tensor val(DT_STRING, TensorShape({}));\n bool is_dead = false;\n Rendezvous::Args args;\n for (auto s : state) {\n bool received = false;\n rendez->RecvAsync(\n KeyFoo(), args,\n [&val, &received](const Status& ,\n const Rendezvous::Args& ,\n const Rendezvous::Args& ,\n const Tensor& tensor, bool ) {\n val = tensor;\n received = true;\n });\n TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead));\n CHECK(received);\n }\n CHECK_EQ(V(val), V(orig));\n rendez->Unref();\n}\nBENCHMARK(BM_RecvSend);\nvoid BM_PingPong(::testing::benchmark::State& state) {\n const int messages_count = state.range(0);\n auto* cm = new CancellationManager();\n thread::ThreadPool* pool = new thread::ThreadPool(Env::Default(), \"test\", 1);\n for (auto s : state) {\n Rendezvous* rendez = NewLocalRendezvous();\n pool->Schedule([rendez, messages_count]() {\n Tensor bar = V(\"bar\");\n Tensor foo(DT_STRING, TensorShape({}));\n bool is_dead = false;\n Rendezvous::Args args;\n for (int i = 0; i < messages_count; ++i) {\n TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &foo, &is_dead));\n TF_CHECK_OK(rendez->Send(KeyBar(), args, bar, is_dead));\n }\n CHECK_EQ(\"foo\", V(foo));\n });\n Tensor foo = V(\"foo\");\n Tensor bar(DT_STRING, TensorShape({}));\n bool is_dead = false;\n Rendezvous::Args args;\n args.cancellation_manager = cm;\n for (int i = 0; i < messages_count; ++i) {\n TF_CHECK_OK(rendez->Send(KeyFoo(), args, foo, is_dead));\n TF_CHECK_OK(rendez->Recv(KeyBar(), args, &bar, &is_dead));\n }\n CHECK_EQ(\"bar\", V(bar));\n rendez->Unref();\n }\n state.SetItemsProcessed(messages_count * state.iterations());\n delete pool;\n delete cm;\n}\nBENCHMARK(BM_PingPong)->Arg(100)->Arg(200)->Arg(300);\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1142,"cells":{"ID":{"kind":"string","value":"ecfc7b9e-9c2a-42a9-b642-6e26f02538cb"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"operand_upcaster"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/operand_upcaster.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/operand_upcaster_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/operand_upcaster.h\"\n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nabsl::StatusOr> MaybeInferShape(\n const HloInstruction* instruction) {\n switch (instruction->opcode()) {\n case HloOpcode::kDot:\n return ShapeInference::InferDotOpShape(\n instruction->operand(0)->shape(), instruction->operand(1)->shape(),\n instruction->dot_dimension_numbers(),\n std::nullopt,\n Cast(instruction)->sparsity());\n case HloOpcode::kConvolution:\n return ShapeInference::InferConvolveShape(\n instruction->operand(0)->shape(), instruction->operand(1)->shape(),\n instruction->feature_group_count(), instruction->batch_group_count(),\n instruction->window(), instruction->convolution_dimension_numbers(),\n std::nullopt);\n default:\n return std::optional(std::nullopt);\n }\n}\n} \nbool OperandUpcaster::InstructionMatchesPattern(HloInstruction* instruction) {\n auto status_or_inferred_shape = MaybeInferShape(instruction);\n if (!status_or_inferred_shape.ok() ||\n !status_or_inferred_shape->has_value()) {\n return false;\n }\n if (absl::c_count(instruction->precision_config().operand_precision(),\n PrecisionConfig::PACKED_NIBBLE) == 2) {\n return true;\n }\n PrimitiveType inferred_type = (*status_or_inferred_shape)->element_type();\n if (instruction->shape().element_type() == inferred_type &&\n instruction->operand(0)->shape().element_type() == inferred_type &&\n instruction->operand(1)->shape().element_type() == inferred_type) {\n return false;\n }\n return ShapeUtil::ElementCanUpcast(**status_or_inferred_shape,\n instruction->shape());\n}\nabsl::StatusOr OperandUpcaster::ExpandInstruction(\n HloInstruction* instruction) {\n const bool packed_nibble =\n absl::c_count(instruction->precision_config().operand_precision(),\n PrecisionConfig::PACKED_NIBBLE) == 2;\n auto type = instruction->shape().element_type();\n if (packed_nibble) {\n HloInstruction *lhs_n0 = instruction->mutable_operand(0), *lhs_n1 = lhs_n0,\n *rhs_n0 = instruction->mutable_operand(1), *rhs_n1 = rhs_n0;\n TF_ASSIGN_OR_RETURN(lhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, lhs_n0,\n MakeScalarLike(lhs_n0, 4)));\n HloOpcode lhs_shift = ShapeUtil::ElementIsSigned(lhs_n0->shape())\n ? HloOpcode::kShiftRightArithmetic\n : HloOpcode::kShiftRightLogical;\n TF_ASSIGN_OR_RETURN(\n lhs_n0, MakeBinaryHlo(lhs_shift, lhs_n0, MakeScalarLike(lhs_n0, 4)));\n lhs_n0 = MakeConvertToHlo(lhs_n0, type);\n TF_ASSIGN_OR_RETURN(\n lhs_n1, MakeBinaryHlo(lhs_shift, lhs_n1, MakeScalarLike(lhs_n1, 4)));\n lhs_n1 = MakeConvertToHlo(lhs_n1, type);\n TF_ASSIGN_OR_RETURN(rhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, rhs_n0,\n MakeScalarLike(rhs_n0, 4)));\n HloOpcode rhs_shift = ShapeUtil::ElementIsSigned(rhs_n0->shape())\n ? HloOpcode::kShiftRightArithmetic\n : HloOpcode::kShiftRightLogical;\n TF_ASSIGN_OR_RETURN(\n rhs_n0, MakeBinaryHlo(rhs_shift, rhs_n0, MakeScalarLike(rhs_n0, 4)));\n rhs_n0 = MakeConvertToHlo(rhs_n0, type);\n TF_ASSIGN_OR_RETURN(\n rhs_n1, MakeBinaryHlo(rhs_shift, rhs_n1, MakeScalarLike(rhs_n1, 4)));\n rhs_n1 = MakeConvertToHlo(rhs_n1, type);\n HloInstruction* linear_n0 =\n instruction->parent()->AddInstruction(instruction->CloneWithNewOperands(\n instruction->shape(), {lhs_n0, rhs_n0}));\n linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(\n 0, PrecisionConfig::DEFAULT);\n linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(\n 1, PrecisionConfig::DEFAULT);\n HloInstruction* linear_n1 =\n instruction->parent()->AddInstruction(linear_n0->CloneWithNewOperands(\n instruction->shape(), {lhs_n1, rhs_n1}));\n return MakeBinaryHlo(HloOpcode::kAdd, linear_n0, linear_n1);\n }\n for (int i = 0; i < HloDotInstruction::kOperands; ++i) {\n auto* operand = instruction->mutable_operand(i);\n if (operand->shape().element_type() == type) {\n continue;\n }\n auto upcast_shape = operand->shape();\n upcast_shape.set_element_type(type);\n auto* convert_inst = instruction->AddInstruction(\n HloInstruction::CreateConvert(upcast_shape, operand));\n TF_RETURN_IF_ERROR(\n instruction->ReplaceOperandWithDifferentShape(i, convert_inst));\n }\n return nullptr;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/operand_upcaster.h\"\n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"absl/strings/substitute.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = ::xla::testing::opcode_matchers;\nclass OperandUpcasterTest\n : public HloTestBase,\n public ::testing::WithParamInterface<\n std::tuple> {};\nbool ShouldUpcast(PrimitiveType operand_type, PrimitiveType result_type) {\n return operand_type != result_type &&\n primitive_util::HigherPrecisionType(operand_type, result_type) ==\n result_type;\n}\nTEST_P(OperandUpcasterTest, ConvertInserted) {\n PrimitiveType lhs_type, rhs_type, result_type;\n std::tie(lhs_type, rhs_type, result_type) = GetParam();\n absl::string_view module_tmpl = R\"(\n HloModule module\n ENTRY main {\n p0 = $0[2,3]{1,0} parameter(0)\n p1 = $1[3,2]{1,0} parameter(1)\n ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},\n rhs_contracting_dims={0}\n })\";\n auto module_string = absl::Substitute(\n module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),\n primitive_util::LowercasePrimitiveTypeName(rhs_type),\n primitive_util::LowercasePrimitiveTypeName(result_type));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));\n EXPECT_EQ(upcasted, ShouldUpcast(lhs_type, result_type) ||\n ShouldUpcast(rhs_type, result_type));\n auto original_lhs = op::Parameter(0);\n auto original_rhs = op::Parameter(1);\n auto upcasted_lhs =\n ShouldUpcast(lhs_type, result_type)\n ? AllOf(op::Convert(original_lhs),\n op::Shape(absl::Substitute(\n \"$0[2,3]{1,0}\",\n primitive_util::LowercasePrimitiveTypeName(result_type))))\n : original_lhs;\n auto upcasted_rhs =\n ShouldUpcast(rhs_type, result_type)\n ? AllOf(op::Convert(original_rhs),\n op::Shape(absl::Substitute(\n \"$0[3,2]{1,0}\",\n primitive_util::LowercasePrimitiveTypeName(result_type))))\n : original_rhs;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n AllOf(op::Dot(upcasted_lhs, upcasted_rhs),\n op::Shape(absl::Substitute(\n \"$0[2,2]{1,0}\",\n primitive_util::LowercasePrimitiveTypeName(result_type)))));\n}\nINSTANTIATE_TEST_SUITE_P(S16U16, OperandUpcasterTest,\n ::testing::Values(std::make_tuple(S8, S8, S16),\n std::make_tuple(U8, U8, U16)));\nINSTANTIATE_TEST_SUITE_P(S32, OperandUpcasterTest,\n ::testing::Combine(::testing::Values(S8, U8, S16),\n ::testing::Values(S8, U8, S16),\n ::testing::Values(S32)));\nINSTANTIATE_TEST_SUITE_P(U32, OperandUpcasterTest,\n ::testing::Combine(::testing::Values(U8, U16),\n ::testing::Values(U8, U16),\n ::testing::Values(U32)));\nINSTANTIATE_TEST_SUITE_P(BF16, OperandUpcasterTest,\n ::testing::Combine(::testing::Values(BF16, S8, U8),\n ::testing::Values(BF16, S8, U8),\n ::testing::Values(BF16)));\nINSTANTIATE_TEST_SUITE_P(F32, OperandUpcasterTest,\n ::testing::Combine(::testing::Values(BF16, F16),\n ::testing::Values(BF16, F16),\n ::testing::Values(F32)));\nINSTANTIATE_TEST_SUITE_P(NoUpcast, OperandUpcasterTest,\n ::testing::Values(std::make_tuple(F32, F32, BF16),\n std::make_tuple(S32, S32, U32)));\nTEST_F(OperandUpcasterTest, SparseDot) {\n absl::string_view kHlo = R\"(\n HloModule module\n ENTRY main {\n p0 = bf16[2,16]{1,0} parameter(0)\n p1 = bf16[32,2]{1,0} parameter(1)\n meta = u16[2,2]{1,0} parameter(2)\n ROOT dot = f32[2,2]{1,0} dot(p0, p1, meta),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHlo));\n TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));\n EXPECT_TRUE(upcasted);\n auto upcasted_lhs =\n AllOf(op::Convert(op::Parameter(0)), op::Shape(\"f32[2,16]{1,0}\"));\n auto upcasted_rhs =\n AllOf(op::Convert(op::Parameter(1)), op::Shape(\"f32[32,2]{1,0}\"));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n AllOf(::testing::MakeMatcher(new ::xla::testing::HloMatcher(\n HloOpcode::kDot,\n {upcasted_lhs, upcasted_rhs, op::Parameter(2)})),\n op::Shape(\"f32[2,2]{1,0}\")));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1143,"cells":{"ID":{"kind":"string","value":"b969274a-0550-41b7-9010-91181907524b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dot_merger"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/dot_merger.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/dot_merger_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/dot_merger.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/protobuf_util.h\"\n#include \"xla/service/graphcycles/graphcycles.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nabsl::StatusOr TryMergeSameOperand(HloInstruction* a,\n HloInstruction* b) {\n if (a->shape().layout() != b->shape().layout()) {\n VLOG(3) << \"Can't merge dots because they have a different layout:\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n return nullptr;\n }\n if (a->operand(0) != b->operand(0) && a->operand(1) != b->operand(1)) {\n VLOG(4) << \"Can't merge dots because they don't share an operand.\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n return nullptr;\n }\n if (a->operand(0)->shape().element_type() !=\n b->operand(0)->shape().element_type() ||\n a->operand(1)->shape().element_type() !=\n b->operand(1)->shape().element_type() ||\n a->shape().element_type() != b->shape().element_type()) {\n VLOG(3)\n << \"Can't merge dots because their lhs/rhs/return-types don't match.\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n return nullptr;\n }\n const DotDimensionNumbers& dnums_a = a->dot_dimension_numbers();\n const DotDimensionNumbers& dnums_b = b->dot_dimension_numbers();\n if (!absl::c_equal(dnums_a.lhs_batch_dimensions(),\n dnums_b.lhs_batch_dimensions()) ||\n !absl::c_equal(dnums_a.rhs_batch_dimensions(),\n dnums_b.rhs_batch_dimensions()) ||\n !absl::c_equal(dnums_a.lhs_contracting_dimensions(),\n dnums_b.lhs_contracting_dimensions()) ||\n !absl::c_equal(dnums_a.rhs_contracting_dimensions(),\n dnums_b.rhs_contracting_dimensions())) {\n VLOG(3) << \"Can't merge dots because they have mismatching dnums.\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString() << \"\\n\"\n << absl::c_equal(dnums_a.lhs_batch_dimensions(),\n dnums_b.lhs_batch_dimensions())\n << \", \"\n << absl::c_equal(dnums_a.rhs_batch_dimensions(),\n dnums_b.rhs_batch_dimensions())\n << \", \"\n << absl::c_equal(dnums_a.lhs_contracting_dimensions(),\n dnums_b.lhs_contracting_dimensions())\n << \", \"\n << absl::c_equal(dnums_a.rhs_contracting_dimensions(),\n dnums_b.rhs_contracting_dimensions());\n return nullptr;\n }\n if (!absl::c_equal(a->precision_config().operand_precision(),\n b->precision_config().operand_precision())) {\n VLOG(3) << \"Can't merge dots because they have mismatching operand \"\n \"precisions:\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n return nullptr;\n }\n HloDotInstruction* dot_a = Cast(a);\n HloDotInstruction* dot_b = Cast(b);\n if (!absl::c_equal(dot_a->sparsity(), dot_b->sparsity(),\n protobuf_util::ProtobufEquals)) {\n VLOG(3) << \"Can't merge dots because they have mismatching sparsity \"\n \"descriptors:\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n return nullptr;\n }\n VLOG(2) << \"Merging dots sharing an operand:\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n const DotDimensionNumbers& dnums = a->dot_dimension_numbers();\n bool lhs_same = a->operand(0) == b->operand(0);\n HloInstruction* shared_op = a->mutable_operand(lhs_same ? 0 : 1);\n HloInstruction* diff_op_a = a->mutable_operand(lhs_same ? 1 : 0);\n HloInstruction* diff_op_b = b->mutable_operand(lhs_same ? 1 : 0);\n if (diff_op_a->shape().layout() != diff_op_b->shape().layout()) {\n VLOG(3) << \"Can't merge dots because the different operands have a \"\n \"different layout:\\n\"\n << \"\\t\" << diff_op_a->ToString() << \"\\n\"\n << \"\\t\" << diff_op_b->ToString();\n return nullptr;\n }\n CHECK_EQ(dnums.lhs_batch_dimensions_size(),\n dnums.rhs_batch_dimensions_size());\n std::set used_dims;\n int64_t shared_op_num_non_contracting_dims =\n shared_op->shape().rank() - dnums.lhs_batch_dimensions_size();\n if (lhs_same) {\n shared_op_num_non_contracting_dims -=\n dnums.lhs_contracting_dimensions_size();\n used_dims.insert(dnums.rhs_contracting_dimensions().begin(),\n dnums.rhs_contracting_dimensions().end());\n used_dims.insert(dnums.rhs_batch_dimensions().begin(),\n dnums.rhs_batch_dimensions().end());\n } else {\n shared_op_num_non_contracting_dims -=\n dnums.rhs_contracting_dimensions_size();\n used_dims.insert(dnums.lhs_contracting_dimensions().begin(),\n dnums.lhs_contracting_dimensions().end());\n used_dims.insert(dnums.lhs_batch_dimensions().begin(),\n dnums.lhs_batch_dimensions().end());\n }\n if (used_dims.size() + 1 != diff_op_a->shape().rank()) {\n VLOG(3)\n << \"Can't merge dots because the different operands don't have exactly \"\n \"one non-contracting dimension:\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n return nullptr;\n }\n int64_t outer_dim = 0;\n for (auto used_dim : used_dims) {\n if (used_dim != outer_dim) {\n break;\n }\n ++outer_dim;\n }\n std::vector sparsity(dot_a->sparsity().begin(),\n dot_a->sparsity().end());\n std::vector sparse_meta(sparsity.size());\n for (int i = 0; i < sparsity.size(); ++i) {\n HloInstruction* meta = a->mutable_operand(HloDotInstruction::kOperands + i);\n HloInstruction* other_meta =\n b->mutable_operand(HloDotInstruction::kOperands + i);\n if (sparsity[i].index() == (lhs_same ? 1 : 0)) {\n TF_ASSIGN_OR_RETURN(\n Shape meta_concat_shape,\n ShapeInference::InferConcatOpShape(\n {&meta->shape(), &other_meta->shape()}, outer_dim));\n meta = meta->AddInstruction(HloInstruction::CreateConcatenate(\n meta_concat_shape, {meta, other_meta}, outer_dim));\n } else {\n if (other_meta != meta) {\n VLOG(3)\n << \"Can't merge dots because the sparsity metadata is different:\\n\"\n << \"\\t\" << a->ToString() << \"\\n\"\n << \"\\t\" << b->ToString();\n return nullptr;\n }\n }\n sparse_meta[i] = meta;\n }\n TF_ASSIGN_OR_RETURN(\n Shape concat_shape,\n ShapeInference::InferConcatOpShape(\n {&diff_op_a->shape(), &diff_op_b->shape()}, outer_dim));\n *concat_shape.mutable_layout() = diff_op_a->shape().layout();\n HloInstruction* concat_op =\n diff_op_a->AddInstruction(HloInstruction::CreateConcatenate(\n concat_shape, {diff_op_a, diff_op_b}, outer_dim));\n HloInstruction* dot_lhs = lhs_same ? shared_op : concat_op;\n HloInstruction* dot_rhs = lhs_same ? concat_op : shared_op;\n TF_ASSIGN_OR_RETURN(\n Shape new_dot_shape,\n ShapeInference::InferDotOpShape(\n dot_lhs->shape(), dot_rhs->shape(), dnums,\n a->shape().element_type(), sparsity));\n *new_dot_shape.mutable_layout() = a->shape().layout();\n HloInstruction* new_dot = a->AddInstruction(\n HloInstruction::CreateDot(new_dot_shape, dot_lhs, dot_rhs, dnums,\n a->precision_config(), sparsity, sparse_meta));\n if (!a->metadata().op_name().empty()) {\n new_dot->set_metadata(a->metadata());\n } else if (!b->metadata().op_name().empty()) {\n new_dot->set_metadata(b->metadata());\n }\n DimensionVector start_indices(new_dot_shape.dimensions_size(), 0);\n DimensionVector limit_indices(new_dot_shape.dimensions().begin(),\n new_dot_shape.dimensions().end());\n DimensionVector strides(new_dot_shape.dimensions_size(), 1);\n int64_t slice_dim = new_dot_shape.dimensions_size() -\n (lhs_same ? 1 : 1 + shared_op_num_non_contracting_dims);\n limit_indices[slice_dim] = a->shape().dimensions(slice_dim);\n HloInstruction* new_a = a->AddInstruction(HloInstruction::CreateSlice(\n a->shape(), new_dot, start_indices, limit_indices, strides));\n TF_RETURN_IF_ERROR(a->ReplaceAllUsesWith(new_a));\n start_indices[slice_dim] = limit_indices[slice_dim];\n limit_indices[slice_dim] = new_dot_shape.dimensions(slice_dim);\n HloInstruction* new_b = b->AddInstruction(HloInstruction::CreateSlice(\n b->shape(), new_dot, start_indices, limit_indices, strides));\n TF_RETURN_IF_ERROR(b->ReplaceAllUsesWith(new_b));\n return new_dot;\n}\nabsl::StatusOr MergeDots(HloComputation* comp,\n int64_t max_size_to_merge) {\n auto is_merge_candidate = [&](HloInstruction* instr) {\n int64_t bytes = ShapeUtil::ByteSizeOfElements(instr->shape());\n for (const HloInstruction* operand : instr->operands()) {\n bytes += ShapeUtil::ByteSizeOfElements(operand->shape());\n }\n return bytes <= max_size_to_merge;\n };\n absl::flat_hash_map>\n equivalence_classes;\n for (HloInstruction* instr : comp->instructions()) {\n if (instr->opcode() != HloOpcode::kDot ||\n !instr->control_predecessors().empty() ||\n !instr->control_successors().empty()) {\n continue;\n }\n for (HloInstruction* operand : instr->operands()) {\n equivalence_classes[operand].insert(instr);\n }\n }\n absl::erase_if(\n equivalence_classes,\n [&](const std::pair>& kv) {\n const auto& v = kv.second;\n return v.size() < 2 || absl::c_none_of(v, is_merge_candidate);\n });\n if (equivalence_classes.empty()) {\n return false;\n }\n GraphCycles graph;\n absl::flat_hash_map graph_ids_map;\n auto graph_id = [&](HloInstruction* instr) {\n auto it_and_inserted = graph_ids_map.emplace(instr, -1);\n auto it = it_and_inserted.first;\n auto inserted = it_and_inserted.second;\n if (inserted) {\n it->second = graph.NewNode();\n }\n return it->second;\n };\n for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {\n int32_t id = graph_id(instr);\n for (HloInstruction* operand : instr->operands()) {\n CHECK(graph.InsertEdge(graph_id(operand), id));\n }\n for (HloInstruction* control_pred : instr->control_predecessors()) {\n CHECK(graph.InsertEdge(graph_id(control_pred), id));\n }\n }\n absl::flat_hash_set dead_instrs;\n std::vector keys;\n keys.reserve(equivalence_classes.size());\n for (auto& kv : equivalence_classes) {\n keys.push_back(kv.first);\n }\n absl::c_sort(keys, [](const HloInstruction* a, const HloInstruction* b) {\n return a->unique_id() < b->unique_id();\n });\n for (auto key : keys) {\n const auto& values = equivalence_classes[key];\n absl::InlinedVector dots(values.begin(), values.end());\n absl::c_sort(dots, [](const HloInstruction* a, const HloInstruction* b) {\n return a->unique_id() < b->unique_id();\n });\n for (int64_t i = 0; i < dots.size(); i++) {\n HloInstruction*& a = dots[i];\n if (a == nullptr) {\n continue;\n }\n for (int64_t j = i + 1; j < dots.size(); j++) {\n HloInstruction* b = dots[j];\n if (b == nullptr) {\n continue;\n }\n int32_t a_id = graph_id(a);\n int32_t b_id = graph_id(b);\n if (dead_instrs.contains(a) || dead_instrs.contains(b) ||\n (!is_merge_candidate(a) && !is_merge_candidate(b)) ||\n graph.IsReachableNonConst(a_id, b_id) ||\n graph.IsReachableNonConst(b_id, a_id)) {\n continue;\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * merged, TryMergeSameOperand(a, b));\n if (merged != nullptr) {\n int32_t merged_id = graph_id(merged);\n graph.InsertEdge(a_id, merged_id);\n graph.InsertEdge(b_id, merged_id);\n for (int32_t succ : graph.SuccessorsCopy(a_id)) {\n graph.InsertEdge(merged_id, succ);\n }\n for (int32_t succ : graph.SuccessorsCopy(b_id)) {\n graph.InsertEdge(merged_id, succ);\n }\n dead_instrs.insert(a);\n dead_instrs.insert(b);\n dots[i] = merged;\n dots[j] = nullptr;\n }\n }\n }\n }\n for (HloInstruction* instr : dead_instrs) {\n TF_RETURN_IF_ERROR(comp->RemoveInstruction(instr));\n }\n return !dead_instrs.empty();\n}\n} \nabsl::StatusOr DotMerger::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (HloComputation* comp :\n module->MakeNonfusionComputations(execution_threads)) {\n TF_ASSIGN_OR_RETURN(bool changed_computation,\n MergeDots(comp, max_size_to_merge_));\n changed |= changed_computation;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/dot_merger.h\"\n#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/algebraic_simplifier.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace m = ::xla::match;\nclass DotMergerTest : public HloTestBase {\n public:\n DotMergerTest()\n : HloTestBase(false,\n false) {}\n};\nTEST_F(DotMergerTest, MergeRHS) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs = f32[200,100] parameter(0)\n rhs0 = f32[100, 10] parameter(1)\n rhs1 = f32[100, 50] parameter(2)\n dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n const HloInstruction* dot0 = nullptr;\n const HloInstruction* dot1 = nullptr;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1)))));\n EXPECT_EQ(dot0, dot1);\n EXPECT_THAT(dot0,\n GmockMatch(m::Dot(m::Parameter(0),\n m::Concatenate().WithBinaryOperandsAnyOrder(\n m::Parameter(1), m::Parameter(2)))));\n}\nTEST_F(DotMergerTest, MergeRHSWithLayouts) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs = f32[200,100] parameter(0)\n rhs0 = f32[100, 10]{0,1} parameter(1)\n rhs1 = f32[100, 50]{0,1} parameter(2)\n dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n const HloInstruction* dot0 = nullptr;\n const HloInstruction* dot1 = nullptr;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1)))));\n EXPECT_EQ(dot0, dot1);\n Shape expected_concat_shape =\n ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 60}, {0, 1});\n EXPECT_THAT(\n dot0, GmockMatch(m::Dot(m::Parameter(0),\n m::Concatenate()\n .WithBinaryOperandsAnyOrder(m::Parameter(1),\n m::Parameter(2))\n .WithShapeEqualTo(&expected_concat_shape))));\n}\nTEST_F(DotMergerTest, NoMergeDifferentLayoutRHS) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs = f32[200,100] parameter(0)\n rhs0 = f32[100, 10]{0,1} parameter(1)\n rhs1 = f32[100, 50]{1,0} parameter(2)\n dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, MergeLHS) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n lhs1 = f32[300,200] parameter(1)\n rhs = f32[200, 50] parameter(2)\n dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(), m::Slice())));\n}\nTEST_F(DotMergerTest, MergeLHSDotsWithNonDefaultLayout) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n lhs1 = f32[300,200] parameter(1)\n rhs = f32[200, 50] parameter(2)\n dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[300, 50]{0,1} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{0,1}) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n Shape expected_dot_shape =\n ShapeUtil::MakeShapeWithDenseLayout(F32, {400, 50}, {0, 1});\n const HloInstruction* dot0 = nullptr;\n const HloInstruction* dot1 = nullptr;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(m::Dot(&dot0, m::Op(), m::Op())\n .WithShapeEqualTo(&expected_dot_shape)),\n m::Slice(m::Dot(&dot1, m::Op(), m::Op())))));\n EXPECT_EQ(dot0, dot1);\n}\nTEST_F(DotMergerTest, NoMergeDifferentLayoutLHS) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200]{1,0} parameter(0)\n lhs1 = f32[300,200]{0,1} parameter(1)\n rhs = f32[200, 50] parameter(2)\n dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeDifferentDotLayout) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n lhs1 = f32[300,200] parameter(1)\n rhs = f32[200, 50] parameter(2)\n dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[300, 50]{1,0} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{1,0}) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, MergeThree) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n lhs1 = f32[300,200] parameter(1)\n lhs2 = f32[500,200] parameter(2)\n rhs = f32[200, 50] parameter(3)\n dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}};\n TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status());\n const HloInstruction* s0 = nullptr;\n const HloInstruction* s1 = nullptr;\n const HloInstruction* s2 = nullptr;\n SCOPED_TRACE(module->ToString());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Dot(\n &s0,\n m::Concatenate(m::Parameter(0), m::Parameter(1), m::Parameter(2)),\n m::Parameter(3))),\n m::Slice(m::Op(&s1)), m::Slice(m::Op(&s2)))));\n EXPECT_EQ(s0, s1);\n EXPECT_EQ(s1, s2);\n}\nTEST_F(DotMergerTest, NoMergeThreeDueToCycle) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n lhs1 = f32[300,200] parameter(1)\n rhs = f32[200, 50] parameter(2)\n dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n zero = f32[] constant(0)\n lhs2 = f32[500,200] pad(dot0, zero), padding=400_0x150_0\n dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}};\n TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status());\n const HloInstruction* s0 = nullptr;\n const HloInstruction* s1 = nullptr;\n const HloInstruction* s2 = nullptr;\n SCOPED_TRACE(module->ToString());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),\n m::Parameter(2))),\n m::Slice(m::Op(&s1)), \n m::Dot(&s2, m::Op(), m::Parameter(2)))));\n EXPECT_EQ(s0, s1);\n EXPECT_NE(s0, s2);\n}\nTEST_F(DotMergerTest, NoMergeDataDependency) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n rhs = f32[200, 50] parameter(1)\n dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n zero = f32[] constant(0)\n lhs1 = f32[300,200] pad(dot0, zero), padding=200_0x150_0\n dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, MergeSameContractingDimsOnBothSides) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n lhs1 = f32[300,200] parameter(1)\n rhs = f32[50, 200] parameter(2)\n dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}\n dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}\n ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(), m::Slice())));\n}\nTEST_F(DotMergerTest, MergeWithBatchDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[2,4,100,200] parameter(0)\n lhs1 = f32[2,4,300,200] parameter(1)\n rhs = f32[2,4,200, 50] parameter(2)\n dot0 = f32[2,4,100, 50] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1},\n lhs_contracting_dims={3}, rhs_contracting_dims={2}\n dot1 = f32[2,4,300, 50] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1},\n lhs_contracting_dims={3}, rhs_contracting_dims={2}\n ROOT tuple = (f32[2,4,100,50], f32[2,4,300,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(), m::Slice())));\n}\nTEST_F(DotMergerTest, MergeWithBatchDimsAndMultipleContractingDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs = f32[2,3,4,5] parameter(0)\n rhs0 = f32[2,6,3,4,5] parameter(1)\n rhs1 = f32[2,7,3,4,5] parameter(2)\n dot0 = f32[2,4,6] dot(lhs, rhs0), lhs_batch_dims={0,2}, rhs_batch_dims={0,3},\n lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4}\n dot1 = f32[2,4,7] dot(lhs, rhs1), lhs_batch_dims={0,2}, rhs_batch_dims={0,3},\n lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4}\n ROOT tuple = (f32[2,4,6], f32[2,4,7]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n TF_ASSERT_OK(verifier().Run(module.get()).status());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(), m::Slice())));\n}\nTEST_F(DotMergerTest, MergeWithUnsortedBatchDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[2,4,100,200] parameter(0)\n lhs1 = f32[2,4,300,200] parameter(1)\n rhs = f32[2,4,200, 50] parameter(2)\n dot0 = f32[4,2,100, 50] dot(lhs0, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0},\n lhs_contracting_dims={3}, rhs_contracting_dims={2}\n dot1 = f32[4,2,300, 50] dot(lhs1, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0},\n lhs_contracting_dims={3}, rhs_contracting_dims={2}\n ROOT tuple = (f32[4,2,100,50], f32[4,2,300,50]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(m::Slice(), m::Slice())));\n}\nTEST_F(DotMergerTest, NoMergeDueToIsMergeCandidate) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[100,200] parameter(0)\n lhs1 = f32[300,200] parameter(1)\n lhs2 = f32[500,200] parameter(2)\n rhs = f32[200, 50] parameter(3)\n dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass((100 * 50 + 100 * 200 + 200 * 50) *\n sizeof(float));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n const HloInstruction* s0 = nullptr;\n const HloInstruction* s1 = nullptr;\n const HloInstruction* s2 = nullptr;\n SCOPED_TRACE(module->ToString());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),\n m::Parameter(3))),\n m::Slice(m::Op(&s1)),\n m::Dot(&s2, m::Parameter(2), m::Parameter(3)))));\n EXPECT_EQ(s0, s1);\n EXPECT_NE(s0, s2);\n}\nTEST_F(DotMergerTest, NoMergeDifferentLhsBatchDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10,10,10] parameter(0)\n lhs1 = f32[10,10,10,10] parameter(1)\n rhs = f32[10,10,10,10] parameter(2)\n dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2}\n dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,2}, rhs_batch_dims={0,1}, lhs_contracting_dims={1}, rhs_contracting_dims={2}\n ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeDifferentRhsBatchDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10,10,10] parameter(0)\n lhs1 = f32[10,10,10,10] parameter(1)\n rhs = f32[10,10,10,10] parameter(2)\n dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2}\n dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,2}, lhs_contracting_dims={2}, rhs_contracting_dims={1}\n ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, MergeMultipleContractingDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10,10] parameter(0)\n lhs1 = f32[10,10,10] parameter(1)\n rhs = f32[10,10,10] parameter(2)\n dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}\n dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}\n ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n const HloInstruction* s0 = nullptr;\n const HloInstruction* s1 = nullptr;\n SCOPED_TRACE(module->ToString());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),\n m::Parameter(2))),\n m::Slice(m::Op(&s1)))));\n EXPECT_EQ(s0, s1);\n}\nTEST_F(DotMergerTest, MergeMultipleNonContractingDimsInRhsSharedOperand) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[8,9,10] parameter(0)\n lhs1 = f32[8,9,11] parameter(1)\n rhs = f32[8,9,12,13] parameter(2)\n dot0 = f32[10,12,13] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}\n dot1 = f32[11,12,13] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}\n ROOT tuple = (f32[10,12,13], f32[11,12,13]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n TF_ASSERT_OK(verifier().Run(module.get()).status());\n const HloInstruction* s0 = nullptr;\n const HloInstruction* s1 = nullptr;\n SCOPED_TRACE(module->ToString());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)),\n m::Parameter(2))),\n m::Slice(m::Op(&s1)))));\n EXPECT_EQ(s0, s1);\n}\nTEST_F(DotMergerTest, NoMergeMultipleOuterDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10,10] parameter(0)\n lhs1 = f32[10,10,10] parameter(1)\n rhs = f32[10,10,10] parameter(2)\n dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeDifferentLhsContractingDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10] parameter(0)\n lhs1 = f32[10,10] parameter(1)\n rhs = f32[10,10] parameter(2)\n dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeDifferentRhsContractingDims) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10] parameter(0)\n lhs1 = f32[10,10] parameter(1)\n rhs = f32[10,10] parameter(2)\n dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={1}\n ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeControlPredecessor) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10] parameter(0)\n lhs1 = f32[10,10] parameter(1)\n rhs = f32[10,10] parameter(2)\n dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot2 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}, control-predecessors={dot1}\n ROOT tuple = (f32[10,10], f32[10,10], f32[10,10]) tuple(dot0, dot1, dot2)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeDifferentLhsTypes) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f32[10,10] parameter(0)\n lhs1 = f16[10,10] parameter(1)\n rhs = f32[10,10] parameter(2)\n dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeDifferentRhsTypes) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs = f32[10,10] parameter(0)\n rhs0 = f32[10,10] parameter(1)\n rhs1 = f16[10,10] parameter(2)\n dot0 = f32[10,10] dot(lhs, rhs0), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10] dot(lhs, rhs1), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, NoMergeDifferentReturnTypes) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f16[10,10] parameter(0)\n lhs1 = f16[10,10] parameter(1)\n rhs = f16[10,10] parameter(2)\n dot0 = f16[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n ROOT tuple = (f16[10,10], f32[10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(DotMergerTest, MergeWithTypeUpgrade) {\n absl::string_view module_string = R\"(\n HloModule module\n ENTRY main {\n lhs0 = f16[10,10] parameter(0)\n lhs1 = f16[10,10] parameter(1)\n rhs = f16[10,10] parameter(2)\n dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_string));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* d0 = nullptr;\n const HloInstruction* d1 = nullptr;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Dot(&d0, m::Concatenate(m::Parameter(0), m::Parameter(1)),\n m::Parameter(2))\n .WithShape(F32, {20, 10})),\n m::Slice(m::Op(&d1)))));\n EXPECT_EQ(d0, d1);\n}\nTEST_F(DotMergerTest, MergeSparseDotsSameMetadata) {\n absl::string_view kHlo = R\"(\n HloModule test\n ENTRY main {\n lhs0 = f16[5,10,32] parameter(0)\n lhs1 = f16[5,10,32] parameter(1)\n rhs = f16[5,10,16] parameter(2)\n meta = u16[5,10,2] parameter(3)\n dot0 = f32[5,10,10] dot(lhs0, rhs, meta), sparsity=R.2@2:4,\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n dot1 = f32[5,10,10] dot(lhs1, rhs, meta), sparsity=R.2@2:4,\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHlo));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n const HloInstruction *d0, *d1;\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Op(&d0)\n .WithOpcode(HloOpcode::kDot)\n .WithOperand(0, m::Concatenate(m::Parameter(0),\n m::Parameter(1)))\n .WithOperand(1, m::Parameter(2))\n .WithOperand(2, m::Parameter(3))\n .WithShape(F32, {5, 20, 10})),\n m::Slice(m::Op(&d1)))));\n EXPECT_EQ(d0, d1);\n EXPECT_EQ(d0->operand(2)->shape(), ShapeUtil::MakeShape(U16, {5, 10, 2}));\n}\nTEST_F(DotMergerTest, MergeSparseDotsConcatMetadata) {\n absl::string_view kHlo = R\"(\n HloModule test\n ENTRY main {\n lhs0 = f16[5,10,16] parameter(0)\n lhs1 = f16[5,10,16] parameter(1)\n rhs = f16[5,10,32] parameter(2)\n meta0 = u16[5,10,2] parameter(3)\n meta1 = u16[5,10,2] parameter(4)\n dot0 = f32[5,10,10] dot(lhs0, rhs, meta0), sparsity=L.2@2:4,\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n dot1 = f32[5,10,10] dot(lhs1, rhs, meta1), sparsity=L.2@2:4,\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHlo));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_TRUE(changed);\n const HloInstruction *d0, *d1;\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Slice(m::Op(&d0)\n .WithOpcode(HloOpcode::kDot)\n .WithOperand(0, m::Concatenate(m::Parameter(0),\n m::Parameter(1)))\n .WithOperand(1, m::Parameter(2))\n .WithOperand(2, m::Concatenate(m::Parameter(3),\n m::Parameter(4)))\n .WithShape(F32, {5, 20, 10})),\n m::Slice(m::Op(&d1)))));\n EXPECT_EQ(d0, d1);\n EXPECT_EQ(d0->operand(2)->shape(), ShapeUtil::MakeShape(U16, {5, 20, 2}));\n}\nTEST_F(DotMergerTest, MergeSparseDotsDifferentMetadata) {\n absl::string_view kHlo = R\"(\n HloModule test\n ENTRY main {\n lhs0 = f16[5,10,32] parameter(0)\n lhs1 = f16[5,10,32] parameter(1)\n rhs = f16[5,10,16] parameter(2)\n meta1 = u16[5,10,2] parameter(3)\n meta2 = u16[5,10,2] parameter(4)\n dot0 = f32[5,10,10] dot(lhs0, rhs, meta1), sparsity=R.2@2:4,\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n dot1 = f32[5,10,10] dot(lhs1, rhs, meta2), sparsity=R.2@2:4,\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHlo));\n DotMerger pass(std::numeric_limits::max());\n TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));\n EXPECT_FALSE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_merger.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_merger_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1144,"cells":{"ID":{"kind":"string","value":"bcbda2a7-93ed-47d7-b14f-5c2b1c80a977"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"host_offloading_prepare"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/host_offloading_prepare.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/host_offloading_prepare_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/host_offloading_prepare.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/host_memory_offload_annotations.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing xla::host_memory_offload_annotations::kMoveToHostCustomCallTarget;\nbool IsHostAsyncStart(const HloInstruction* instruction) {\n return instruction->opcode() == HloOpcode::kAsyncStart &&\n instruction->async_execution_thread() == HloInstruction::kHostThread &&\n instruction->async_wrapped_instruction()->opcode() == HloOpcode::kCall;\n}\nabsl::StatusOr RemoveSurroundingMoveCustomCalls(\n HloInstruction* async_start) {\n bool removed = false;\n for (HloInstruction* operand : async_start->operands()) {\n if (operand->IsCustomCall(kMoveToHostCustomCallTarget)) {\n CHECK_EQ(operand->operands().size(), 1);\n VLOG(1) << \"Replacing \" << operand->ToString() << \" with \"\n << operand->operands().at(0)->ToString();\n TF_RETURN_IF_ERROR(\n operand->ReplaceAllUsesWith(operand->mutable_operand(0)));\n TF_RETURN_IF_ERROR(async_start->parent()->RemoveInstruction(operand));\n removed = true;\n }\n }\n return removed;\n}\nabsl::StatusOr ElideMoveCustomCalls(HloModule* module) {\n bool changed = false;\n std::unique_ptr call_graph = CallGraph::Build(module);\n for (HloComputation* computation : module->computations()) {\n if (computation->execution_thread() != HloInstruction::kHostThread) {\n continue;\n }\n std::vector callers =\n call_graph->GetComputationCallers(computation);\n for (HloInstruction* caller : callers) {\n VLOG(2) << \"Hlo computation \" << computation->name()\n << \" is offloaded to host and has caller \" << caller->ToString();\n if (caller->parent()->execution_thread() == HloInstruction::kHostThread) {\n VLOG(3) << \"Nested host computation, must be a async-wrapper\";\n continue;\n }\n VLOG(2) << \"Going to adjust before and after \" << caller->name();\n }\n }\n for (HloComputation* computation : module->computations()) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (IsHostAsyncStart(instruction)) {\n VLOG(2) << \"Found async start of host computation: \"\n << instruction->ToString() << \" done must be \"\n << instruction->users().at(0)->ToString();\n TF_ASSIGN_OR_RETURN(bool removed,\n RemoveSurroundingMoveCustomCalls(instruction));\n changed = changed || removed;\n }\n }\n }\n return changed;\n}\nabsl::StatusOr ConvertToCustomCall(HloModule* module) {\n bool changed = false;\n for (HloComputation* computation : module->computations()) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (IsHostAsyncStart(instruction)) {\n auto* call_start = Cast(instruction);\n auto* call = call_start->async_wrapped_instruction();\n auto custom_call = HloInstruction::CreateCustomCall(\n call->shape(), call->operands(), call->called_computations().at(0),\n \"HostExecute\");\n custom_call->set_output_to_operand_aliasing(\n call->output_operand_aliasing());\n HloComputation* async_computation =\n call_start->async_wrapped_computation();\n async_computation->set_root_instruction(\n async_computation->AddInstruction(std::move(custom_call)));\n TF_RETURN_IF_ERROR(async_computation->RemoveInstruction(call));\n changed = true;\n }\n }\n }\n return changed;\n}\n} \nabsl::StatusOr HostOffloadingPrepare::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n switch (rewrite_) {\n case Rewrite::kElideMoveToHost:\n return ElideMoveCustomCalls(module);\n case Rewrite::kConvertToCustomCall:\n return ConvertToCustomCall(module);\n }\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/host_offloading_prepare.h\"\n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/host_memory_offload_annotations.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing Rewrite = HostOffloadingPrepare::Rewrite;\nclass HostOffloadingPrepareTest : public HloTestBase {\n protected:\n absl::StatusOr RunRewrite(HloModule* module, Rewrite rewrite) {\n TF_EXPECT_OK(verifier().Run(module).status());\n if (module->has_schedule()) {\n return absl::InternalError(\"Expected a non-scheduled module\");\n }\n HostOffloadingPrepare pass(rewrite);\n TF_ASSIGN_OR_RETURN(bool changed, pass.Run(module));\n return changed;\n }\n std::vector GetHostOffloadAsyncStartInstructions(\n const HloModule* module) {\n std::vector result;\n for (const HloComputation* computation : module->computations()) {\n for (const HloInstruction* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kAsyncStart &&\n instruction->async_execution_thread() ==\n HloInstruction::kHostThread) {\n result.push_back(instruction);\n }\n }\n }\n return result;\n }\n};\nTEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToHost) {\n const std::string& hlo_string = R\"(\nHloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}\nhost_computation {\n Arg_0.0 = s32[32]{0} parameter(0)\n ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)\n}, execution_thread=\"host\"\nasync_computation {\n param_0 = s32[32]{0} parameter(0)\n ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type=\"host\"}\n}, execution_thread=\"host\"\nENTRY main {\n Arg_0.1 = s32[32]{0:T(128)} parameter(0)\n constant.2 = s32[]{:T(128)} constant(2)\n broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}\n multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)\n move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToHost\"\n start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host), async_execution_thread=\"host\", calls=async_computation\n ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type=\"host\"}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunRewrite(module.get(), Rewrite::kElideMoveToHost));\n EXPECT_TRUE(changed);\n for (const HloInstruction* instruction :\n GetHostOffloadAsyncStartInstructions(module.get())) {\n for (const HloInstruction* operand : instruction->operands()) {\n EXPECT_FALSE(operand->IsCustomCall(\n {host_memory_offload_annotations::kMoveToHostCustomCallTarget}));\n }\n for (const HloInstruction* user : instruction->users()) {\n EXPECT_FALSE(user->IsCustomCall(\n {host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));\n }\n }\n}\nTEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToHost) {\n const std::string& hlo_string = R\"(\nHloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}\nhost_computation {\n Arg_0.0 = s32[32]{0} parameter(0)\n Arg_0.1 = s32[32]{0} parameter(1)\n ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)\n}, execution_thread=\"host\"\nasync_computation {\n param_0 = s32[32]{0} parameter(0)\n param_1 = s32[32]{0} parameter(1)\n ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type=\"host\"}\n}, execution_thread=\"host\"\nENTRY main {\n Arg_0.1 = s32[32]{0:T(128)} parameter(0)\n constant.2 = s32[]{:T(128)} constant(2)\n broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}\n multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)\n move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToHost\"\n start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host, move_to_host), async_execution_thread=\"host\", calls=async_computation\n ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type=\"host\"}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunRewrite(module.get(), Rewrite::kElideMoveToHost));\n EXPECT_TRUE(changed);\n for (const HloInstruction* instruction :\n GetHostOffloadAsyncStartInstructions(module.get())) {\n for (const HloInstruction* operand : instruction->operands()) {\n EXPECT_FALSE(operand->IsCustomCall(\n {host_memory_offload_annotations::kMoveToHostCustomCallTarget}));\n }\n for (const HloInstruction* user : instruction->users()) {\n EXPECT_FALSE(user->IsCustomCall(\n {host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));\n }\n }\n}\nTEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToHost) {\n const std::string& hlo_string = R\"(\nHloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}\nhost_computation {\n Arg_0.0 = s32[32]{0} parameter(0)\n Arg_0.1 = s32[32]{0} parameter(1)\n ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)\n}, execution_thread=\"host\"\nasync_computation {\n param_0 = s32[32]{0} parameter(0)\n param_1 = s32[32]{0} parameter(1)\n ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type=\"host\"}\n}, execution_thread=\"host\"\nENTRY main {\n Arg_0.1 = s32[32]{0:T(128)} parameter(0)\n constant.2 = s32[]{:T(128)} constant(2)\n broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}\n multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)\n move_to_host.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToHost\"\n move_to_host.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToHost\"\n start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host.1, move_to_host.2), async_execution_thread=\"host\", calls=async_computation\n ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type=\"host\"}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunRewrite(module.get(), Rewrite::kElideMoveToHost));\n EXPECT_TRUE(changed);\n for (const HloInstruction* instruction :\n GetHostOffloadAsyncStartInstructions(module.get())) {\n for (const HloInstruction* operand : instruction->operands()) {\n EXPECT_FALSE(operand->IsCustomCall(\n {host_memory_offload_annotations::kMoveToHostCustomCallTarget}));\n }\n for (const HloInstruction* user : instruction->users()) {\n EXPECT_FALSE(user->IsCustomCall(\n {host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));\n }\n }\n}\nTEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToDevice) {\n const std::string& hlo_string = R\"(\nHloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}\nhost_computation {\n Arg_0.0 = s32[32]{0} parameter(0)\n ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)\n}, execution_thread=\"host\"\nasync_computation {\n param_0 = s32[32]{0} parameter(0)\n ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type=\"host\"}\n}, execution_thread=\"host\"\nENTRY main {\n Arg_0.1 = s32[32]{0:T(128)} parameter(0)\n constant.2 = s32[]{:T(128)} constant(2)\n broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}\n multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)\n move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToDevice\"\n start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device), async_execution_thread=\"host\", calls=async_computation\n ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type=\"host\"}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunRewrite(module.get(), Rewrite::kElideMoveToHost));\n EXPECT_FALSE(changed);\n}\nTEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToDevice) {\n const std::string& hlo_string = R\"(\nHloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}\nhost_computation {\n Arg_0.0 = s32[32]{0} parameter(0)\n Arg_0.1 = s32[32]{0} parameter(1)\n ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)\n}, execution_thread=\"host\"\nasync_computation {\n param_0 = s32[32]{0} parameter(0)\n param_1 = s32[32]{0} parameter(1)\n ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type=\"host\"}\n}, execution_thread=\"host\"\nENTRY main {\n Arg_0.1 = s32[32]{0:T(128)} parameter(0)\n constant.2 = s32[]{:T(128)} constant(2)\n broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}\n multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)\n move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToDevice\"\n custom-call.cloned.call-start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device, move_to_device), async_execution_thread=\"host\", calls=async_computation\n ROOT custom-call.cloned.call-done = s32[32]{0:T(128)} async-done(custom-call.cloned.call-start), frontend_attributes={_xla_compute_type=\"host\"}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunRewrite(module.get(), Rewrite::kElideMoveToHost));\n EXPECT_FALSE(changed);\n}\nTEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToDevice) {\n const std::string& hlo_string = R\"(\nHloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}\nhost_computation {\n Arg_0.0 = s32[32]{0} parameter(0)\n Arg_0.1 = s32[32]{0} parameter(1)\n ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)\n}, execution_thread=\"host\"\nasync_computation {\n param_0 = s32[32]{0} parameter(0)\n param_1 = s32[32]{0} parameter(1)\n ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type=\"host\"}\n}, execution_thread=\"host\"\nENTRY main {\n Arg_0.1 = s32[32]{0:T(128)} parameter(0)\n constant.2 = s32[]{:T(128)} constant(2)\n broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}\n multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)\n move_to_device.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToDevice\"\n move_to_device.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target=\"MoveToDevice\"\n start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device.1, move_to_device.2), async_execution_thread=\"host\", calls=async_computation\n ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type=\"host\"}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunRewrite(module.get(), Rewrite::kElideMoveToHost));\n EXPECT_FALSE(changed);\n}\nTEST_F(HostOffloadingPrepareTest, ConvertToCustomCall) {\n const char* hlo = R\"(\nHloModule my_module\nhost_computation {\n Arg_0.0 = s32[32] parameter(0)\n ROOT multiply.0 = s32[32] multiply(Arg_0.0, Arg_0.0)\n}, execution_thread=\"host\"\nasync_computation {\n param_0 = s32[32] parameter(0)\n ROOT call = s32[32] call(param_0), to_apply=host_computation\n}, execution_thread=\"host\"\nENTRY main {\n Arg_0.1 = s32[32] parameter(0)\n start = ((s32[32]), s32[32], u32[]) async-start(Arg_0.1),\n async_execution_thread=\"host\", calls=async_computation\n ROOT done = s32[32] async-done(start)\n}\n)\";\n const char* expected = R\"(\n)\";\n RunAndFilecheckHloRewrite(\n hlo, HostOffloadingPrepare(Rewrite::kConvertToCustomCall), expected);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1145,"cells":{"ID":{"kind":"string","value":"b9f15842-1826-48a4-bfa0-e2fbdeeca04f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"convert_async_collectives_to_sync"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/transforms/convert_async_collectives_to_sync.h\"\n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/status/status.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_schedule.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nabsl::Status GpuConvertAsyncCollectivesToSync::ConvertAsyncInstructionsToSync(\n HloComputation* computation,\n absl::Span> async_pairs)\n const {\n absl::flat_hash_map replaced_ops;\n CollectiveBackendConfig sync_config;\n sync_config.set_is_sync(true);\n for (auto& [async_start, async_done] : async_pairs) {\n TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,\n async_start->backend_config());\n *gpu_config.mutable_collective_backend_config() = sync_config;\n TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config));\n replaced_ops[async_start] = nullptr;\n replaced_ops[async_done] = async_start;\n }\n HloModule* module = computation->parent();\n const HloInstructionSequence& sequence =\n module->schedule().sequence(computation);\n std::vector new_sequence;\n new_sequence.reserve(sequence.size());\n for (HloInstruction* instr : sequence.instructions()) {\n auto it = replaced_ops.find(instr);\n if (it == replaced_ops.end()) {\n new_sequence.push_back(instr);\n continue;\n }\n if (it->second == nullptr) {\n continue;\n }\n new_sequence.push_back(it->second);\n new_sequence.push_back(instr);\n }\n module->schedule().set_sequence(computation, new_sequence);\n return absl::OkStatus();\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/transforms/convert_async_collectives_to_sync.h\"\n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing ::testing::IsFalse;\nusing ::testing::IsTrue;\nclass GpuConvertAsyncCollectivesToSyncTest : public HloTestBase {\n public:\n absl::Status RunPass(HloModule *module, bool expect_change,\n HloPredicate is_nop = {}) {\n TF_ASSIGN_OR_RETURN(bool changed,\n GpuConvertAsyncCollectivesToSync{is_nop}.Run(module));\n EXPECT_EQ(changed, expect_change);\n return absl::OkStatus();\n }\n bool IsSync(HloModule *module, std::string_view name) {\n const HloInstruction *inst = FindInstruction(module, name);\n if (inst == nullptr) {\n return false;\n }\n auto backend_config = inst->backend_config()\n .value()\n .collective_backend_config();\n return backend_config.is_sync();\n }\n HloPredicate is_nop_simple_ =\n HloPredicateIsOp;\n};\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduce) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n apply_op {\n x = u32[] parameter(0)\n y = u32[] parameter(1)\n ROOT apply_op = u32[] add(x, y)\n }\n ENTRY test_computation {\n id = u32[] replica-id()\n start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3\n ROOT done = u32[] all-reduce-done(start)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"start\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n apply_op {\n x = u32[] parameter(0)\n y = u32[] parameter(1)\n ROOT apply_op = u32[] add(x, y)\n }\n ENTRY test_computation {\n id = u32[] replica-id()\n start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}}\n id2 = f32[] bitcast(id)\n ROOT done = u32[] all-reduce-done(start)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_));\n EXPECT_THAT(IsSync(module.get(), \"start\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectiveBroadcast) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n collective_broadcast {\n p0 = u32[8] parameter(0)\n ROOT result = u32[8] collective-broadcast(p0), replica_groups={{0,1}, {2,3}}\n }\n ENTRY main {\n data = u32[8] parameter(0)\n cb-start = ((u32[8]{0}), u32[8]{0}) async-start(u32[8]{0} %data), calls=collective_broadcast\n ROOT %ars = u32[8]{0} async-done(((u32[8]{0}), u32[8]{0}) %cb-start), calls=collective_broadcast\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"cb-start\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n apply_op {\n x = u32[] parameter(0)\n y = u32[] parameter(1)\n ROOT apply_op = u32[] add(x, y)\n }\n ENTRY test_computation {\n id = u32[] replica-id()\n start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3\n id2 = u32[] add(id, id)\n ROOT done = u32[] all-reduce-done(start)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), false));\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllGather) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n ENTRY test_computation {\n a1 = u32[1, 2] parameter(0)\n ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3\n ROOT allgather = u32[2,2] all-gather-done(ags)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"ags\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n ENTRY test_computation {\n p = u32[2] parameter(0)\n start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}\n ROOT done = u32[2] collective-permute-done(start)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"start\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n add {\n lhs = u32[] parameter(0)\n rhs = u32[] parameter(1)\n ROOT add = u32[] add(lhs, rhs)\n }\n reduce_scatter {\n p0 = u32[8] parameter(0)\n ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}},\n dimensions={0}, to_apply=add\n }\n ENTRY main {\n data = u32[8] parameter(0)\n rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter\n ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"rs-start\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllToAll) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n all_to_all {\n p0 = u32[2] parameter(0)\n ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}}\n }\n ENTRY test_computation {\n a1 = u32[2] parameter(0)\n a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all\n ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"a2a-start\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, ControlDeps) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n apply_op {\n x = u32[] parameter(0)\n y = u32[] parameter(1)\n ROOT apply_op = u32[] add(x, y)\n }\n ENTRY test_computation {\n id = u32[] replica-id()\n start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3\n done1 = u32[] all-reduce-done(start1)\n start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1}\n done2 = u32[] all-reduce-done(start2)\n ROOT x = u32[] add(done1, done2)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"start1\"), IsTrue());\n EXPECT_THAT(IsSync(module.get(), \"start2\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n apply_op {\n x = u32[] parameter(0)\n y = u32[] parameter(1)\n ROOT apply_op = u32[] add(x, y)\n }\n ENTRY test_computation {\n id = u32[] replica-id()\n start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3\n start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4\n done1 = u32[] all-reduce-done(start1)\n done2 = u32[] all-reduce-done(start2)\n ROOT x = u32[] add(done1, done2)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"start1\"), IsTrue());\n EXPECT_THAT(IsSync(module.get(), \"start2\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n apply_op {\n x = u32[] parameter(0)\n y = u32[] parameter(1)\n ROOT apply_op = u32[] add(x, y)\n }\n ENTRY test_computation {\n id = u32[] replica-id()\n start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3\n start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4\n done2 = u32[] all-reduce-done(start2)\n done1 = u32[] all-reduce-done(start1)\n ROOT x = u32[] add(done1, done2)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"start1\"), IsTrue());\n EXPECT_THAT(IsSync(module.get(), \"start2\"), IsTrue());\n}\nTEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) {\n const absl::string_view hlo_string = R\"(\n HloModule test, is_scheduled=true\n apply_op {\n x = u32[] parameter(0)\n y = u32[] parameter(1)\n ROOT apply_op = u32[] add(x, y)\n }\n ENTRY test_computation {\n id = u32[] replica-id()\n start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3\n start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4\n done2 = u32[] all-reduce-done(start2)\n id2 = u32[] add(done2, done2)\n done1 = u32[] all-reduce-done(start1)\n ROOT x = u32[] add(done1, done2)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(module.get(), true));\n EXPECT_THAT(IsSync(module.get(), \"start1\"), IsFalse());\n EXPECT_THAT(IsSync(module.get(), \"start2\"), IsTrue());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1146,"cells":{"ID":{"kind":"string","value":"13af8cfa-a80f-4abd-b903-d4e5aa058635"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"while_loop_trip_count_annotator"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/while_loop_trip_count_annotator.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/while_loop_trip_count_annotator.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/while_loop_analysis.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nabsl::StatusOr WhileLoopTripCountAnnotator::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (const HloComputation* comp : module->computations(execution_threads)) {\n for (HloInstruction* instr : comp->instructions()) {\n if (instr->opcode() != HloOpcode::kWhile) {\n continue;\n }\n if (auto trip_count = ComputeWhileLoopTripCount(instr)) {\n WhileLoopBackendConfig config;\n config.mutable_known_trip_count()->set_n(*trip_count);\n TF_RETURN_IF_ERROR(instr->set_backend_config(config));\n changed = true;\n }\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/while_loop_trip_count_annotator.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nclass TripCountAnnotatorTest : public HloTestBase {};\nTEST_F(TripCountAnnotatorTest, KnownSmallTripCount) {\n const char* kModuleStr = R\"(\n HloModule test\n Body {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n one = s32[] constant(1)\n i_plus_one = s32[] add(i, one)\n ROOT tuple = (s32[]) tuple(i_plus_one)\n }\n Cond {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n trip_count = s32[] constant(10)\n ROOT done = pred[] compare(i, trip_count), direction=LT\n }\n ENTRY test {\n i_start = s32[] constant(0)\n initial_tuple = (s32[]) tuple(i_start)\n ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n WhileLoopTripCountAnnotator pass;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));\n ASSERT_TRUE(changed);\n TF_ASSERT_OK_AND_ASSIGN(auto config,\n m->entry_computation()\n ->root_instruction()\n ->backend_config());\n EXPECT_EQ(10, config.known_trip_count().n());\n}\nTEST_F(TripCountAnnotatorTest, KnownLargeTripCount) {\n const char* kModuleStr = R\"(\n HloModule test\n Body {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n one = s32[] constant(1)\n i_plus_one = s32[] add(i, one)\n ROOT tuple = (s32[]) tuple(i_plus_one)\n }\n Cond {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n trip_count = s32[] constant(1000000)\n ROOT done = pred[] compare(i, trip_count), direction=LT\n }\n ENTRY test {\n i_start = s32[] constant(0)\n initial_tuple = (s32[]) tuple(i_start)\n ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n WhileLoopTripCountAnnotator pass;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));\n ASSERT_TRUE(changed);\n TF_ASSERT_OK_AND_ASSIGN(auto config,\n m->entry_computation()\n ->root_instruction()\n ->backend_config());\n EXPECT_EQ(1000000, config.known_trip_count().n());\n}\nTEST_F(TripCountAnnotatorTest, NonzeroStart) {\n const char* kModuleStr = R\"(\n HloModule test\n Body {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n one = s32[] constant(1)\n i_plus_one = s32[] add(i, one)\n ROOT tuple = (s32[]) tuple(i_plus_one)\n }\n Cond {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n trip_count = s32[] constant(1000000)\n ROOT done = pred[] compare(i, trip_count), direction=LT\n }\n ENTRY test {\n i_start = s32[] constant(10)\n initial_tuple = (s32[]) tuple(i_start)\n ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n WhileLoopTripCountAnnotator pass;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));\n ASSERT_TRUE(changed);\n TF_ASSERT_OK_AND_ASSIGN(auto config,\n m->entry_computation()\n ->root_instruction()\n ->backend_config());\n EXPECT_EQ(999990, config.known_trip_count().n());\n}\nTEST_F(TripCountAnnotatorTest, LessThanOrEqualTo) {\n const char* kModuleStr = R\"(\n HloModule test\n Body {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n one = s32[] constant(1)\n i_plus_one = s32[] add(i, one)\n ROOT tuple = (s32[]) tuple(i_plus_one)\n }\n Cond {\n param = (s32[]) parameter(0)\n i = s32[] get-tuple-element(param), index=0\n trip_count = s32[] constant(1000000)\n ROOT done = pred[] compare(i, trip_count), direction=LE\n }\n ENTRY test {\n i_start = s32[] constant(10)\n initial_tuple = (s32[]) tuple(i_start)\n ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n WhileLoopTripCountAnnotator pass;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));\n ASSERT_TRUE(changed);\n TF_ASSERT_OK_AND_ASSIGN(auto config,\n m->entry_computation()\n ->root_instruction()\n ->backend_config());\n EXPECT_EQ(999991, config.known_trip_count().n());\n}\nTEST_F(TripCountAnnotatorTest, Int64Overflow) {\n const char* kModuleStr = R\"(\n HloModule test\n Body {\n param = (s64[]) parameter(0)\n i = s64[] get-tuple-element(param), index=0\n one = s64[] constant(1)\n i_plus_one = s64[] add(i, one)\n ROOT tuple = (s64[]) tuple(i_plus_one)\n }\n Cond {\n param = (s64[]) parameter(0)\n i = s64[] get-tuple-element(param), index=0\n trip_count = s64[] constant(9223372036854775807) \n ROOT done = pred[] compare(i, trip_count), direction=LE\n }\n ENTRY test {\n i_start = s64[] constant(-9223372036854775808) \n initial_tuple = (s64[]) tuple(i_start)\n ROOT while = (s64[]) while(initial_tuple), condition=Cond, body=Body\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n WhileLoopTripCountAnnotator pass;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));\n EXPECT_FALSE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1147,"cells":{"ID":{"kind":"string","value":"53602328-c7cf-4c6d-8ee9-b9779b3bed8b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"ar_crs_combiner"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/ar_crs_combiner.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/ar_crs_combiner_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/ar_crs_combiner.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/hlo_replication_analysis.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nabsl::StatusOr ReplaceReplicatedAllReduce(HloModule* module,\n int64_t partition_count) {\n TF_ASSIGN_OR_RETURN(\n auto replication_analysis,\n HloReplicationAnalysis::Run(module, true));\n bool changed = false;\n int64_t next_channel = hlo_query::NextChannelId(*module);\n for (auto computation : module->computations()) {\n for (auto instruction : computation->instructions()) {\n if (auto ar = DynCast(instruction)) {\n const Shape& shape = ar->shape();\n if (ar->channel_id()) {\n continue;\n }\n if (ar->replica_groups().size() > 1) {\n continue;\n }\n if (shape.IsTuple() || shape.element_type() != F32) {\n continue;\n }\n if (module->config().replica_count() < 8 * partition_count) {\n continue;\n }\n if (replication_analysis->HloInstructionIsReplicatedAt(ar, {})) {\n VLOG(2) << \"Replaced replicated all-reduce:\" << ar->ToString();\n ar->set_channel_id(next_channel++);\n auto divisor =\n computation->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(partition_count)));\n auto bcast = computation->AddInstruction(\n HloInstruction::CreateBroadcast(shape, divisor, {}));\n auto div = computation->AddInstruction(HloInstruction::CreateBinary(\n ar->shape(), HloOpcode::kDivide, ar, bcast));\n TF_RETURN_IF_ERROR(ar->ReplaceAllUsesWith(div));\n changed = true;\n }\n }\n }\n }\n return changed;\n}\nbool HasCombinableReplicaGroup(HloInstruction* hlo, int64_t num_partitions) {\n auto all_reduce = Cast(hlo);\n auto replica_groups = all_reduce->replica_groups();\n const int64_t replica_count = hlo->GetModule()->config().replica_count();\n CHECK(all_reduce->IsCrossModuleAllReduce());\n if (all_reduce->use_global_device_ids()) {\n if (replica_groups.size() != replica_count) {\n return false;\n }\n for (const auto& group : replica_groups) {\n if (group.replica_ids_size() != num_partitions) {\n return false;\n }\n absl::flat_hash_set partition_ids;\n int64_t replica_id = group.replica_ids(0) / num_partitions;\n for (int64_t i = 0; i < num_partitions; ++i) {\n if (group.replica_ids(i) / num_partitions != replica_id) {\n return false;\n }\n partition_ids.insert(group.replica_ids(i) % num_partitions);\n }\n if (partition_ids.size() != num_partitions) {\n return false;\n }\n }\n return true;\n }\n return replica_groups.size() == replica_count;\n}\n} \nnamespace m = match;\nstd::optional ArCrsCombiner::MatchesArCrsPattern(\n HloInstruction* instruction) {\n auto can_ar_move_past_instruction = [](HloInstruction* instruction) -> bool {\n if (instruction->user_count() != 1) {\n return false;\n }\n switch (instruction->opcode()) {\n case HloOpcode::kBitcast:\n case HloOpcode::kTranspose:\n case HloOpcode::kReshape:\n return true;\n case HloOpcode::kConvert:\n return ShapeUtil::ElementIsFloating(instruction->shape()) ==\n ShapeUtil::ElementIsFloating(instruction->operand(0)->shape());\n case HloOpcode::kAdd:\n case HloOpcode::kSubtract:\n case HloOpcode::kMultiply:\n return ShapeUtil::ElementIsFloating(instruction->shape());\n default:\n return false;\n }\n };\n auto computation_is_addition = [](HloComputation* c) {\n return c->instruction_count() == 3 &&\n Match(c->root_instruction(), m::Add(m::Parameter(), m::Parameter()));\n };\n if (instruction->IsCrossModuleAllReduce() &&\n HasCombinableReplicaGroup(instruction, num_spatial_partitions_) &&\n computation_is_addition(instruction->called_computations()[0]) &&\n instruction->user_count() == 1) {\n auto next = instruction->users()[0];\n int64_t distance = 1;\n while (!next->IsCrossReplicaAllReduce()) {\n if (can_ar_move_past_instruction(next)) {\n next = next->users()[0];\n } else {\n return std::nullopt;\n }\n ++distance;\n }\n if (!Cast(next)->IsNoop() &&\n computation_is_addition(next->called_computations()[0])) {\n ArCrsPair pair(instruction, next, distance);\n VLOG(2) << \"ArCrsPair matching pattern: \" << pair.ToString();\n return pair;\n }\n }\n return std::nullopt;\n}\nstd::optional ArCrsCombiner::WhileFromBodyParameter(\n HloInstruction* instruction) {\n CHECK_EQ(HloOpcode::kParameter, instruction->opcode());\n HloComputation* computation = instruction->parent();\n auto caller_instructions = call_graph_->GetComputationCallers(computation);\n if (caller_instructions.size() == 1) {\n auto caller_instruction = caller_instructions[0];\n if (caller_instruction->opcode() == HloOpcode::kWhile) {\n return caller_instruction;\n }\n }\n return std::nullopt;\n}\nstd::optional ArCrsCombiner::ConditionalFromBodyParameter(\n HloInstruction* instruction) {\n CHECK_EQ(HloOpcode::kParameter, instruction->opcode());\n HloComputation* computation = instruction->parent();\n auto caller_instructions = call_graph_->GetComputationCallers(computation);\n if (caller_instructions.size() == 1) {\n auto caller_instruction = caller_instructions[0];\n if (caller_instruction->opcode() == HloOpcode::kConditional) {\n return caller_instruction;\n }\n }\n return std::nullopt;\n}\nstd::optional> ArCrsCombiner::GetAllTuples(\n HloInstruction* instruction,\n absl::flat_hash_set* visited) {\n if (visited->find(instruction) != visited->end()) {\n return std::vector();\n }\n visited->insert(instruction);\n switch (instruction->opcode()) {\n case HloOpcode::kTuple: {\n return std::vector({instruction});\n }\n case HloOpcode::kDomain: {\n return GetAllTuples(instruction->operands()[0], visited);\n }\n case HloOpcode::kParameter: {\n auto maybe_while = WhileFromBodyParameter(instruction);\n if (maybe_while) {\n auto while_instr = *maybe_while;\n auto init_tuples = GetAllTuples(while_instr->while_init(), visited);\n auto body_tuples = GetAllTuples(\n while_instr->while_body()->root_instruction(), visited);\n if (!init_tuples || !body_tuples) {\n return std::nullopt;\n }\n auto result = *init_tuples;\n result.insert(result.end(), body_tuples->begin(), body_tuples->end());\n return result;\n }\n auto maybe_conditional = ConditionalFromBodyParameter(instruction);\n if (maybe_conditional) {\n auto cond_instr = *maybe_conditional;\n std::vector tuples;\n for (int64_t i = 0; i < cond_instr->branch_computations().size(); ++i) {\n if (cond_instr->branch_computation(i)->parameter_instruction(0) ==\n instruction) {\n auto branch_tuples =\n GetAllTuples(cond_instr->mutable_operand(i + 1), visited);\n if (!branch_tuples) {\n return std::nullopt;\n }\n tuples.insert(tuples.end(), branch_tuples->begin(),\n branch_tuples->end());\n }\n }\n return tuples;\n }\n return std::nullopt;\n }\n case HloOpcode::kGetTupleElement: {\n std::vector result_tuples;\n auto tuples = GetAllTuples(instruction->operands()[0], visited);\n if (!tuples) {\n return std::nullopt;\n }\n for (auto tuple : *tuples) {\n auto tmp_tuples = GetAllTuples(\n tuple->mutable_operand(instruction->tuple_index()), visited);\n if (!tmp_tuples) {\n return std::nullopt;\n }\n result_tuples.insert(result_tuples.end(), tmp_tuples->begin(),\n tmp_tuples->end());\n }\n return result_tuples;\n }\n case HloOpcode::kConditional: {\n std::vector result_tuples;\n const auto& branch_computations = instruction->branch_computations();\n result_tuples.reserve(branch_computations.size());\n for (HloComputation* body : branch_computations) {\n if (body->root_instruction()->opcode() != HloOpcode::kTuple) {\n return std::nullopt;\n }\n result_tuples.push_back(body->root_instruction());\n }\n return result_tuples;\n }\n case HloOpcode::kWhile: {\n auto init_tuples = GetAllTuples(instruction->while_init(), visited);\n auto body_tuples =\n GetAllTuples(instruction->while_body()->root_instruction(), visited);\n if (!init_tuples || !body_tuples) {\n return std::nullopt;\n }\n auto result = *init_tuples;\n result.insert(result.end(), body_tuples->begin(), body_tuples->end());\n return result;\n }\n default:\n return std::nullopt;\n }\n}\nbool ArCrsCombiner::TupleElementsComputeSameValue(\n HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2,\n absl::flat_hash_map* visited_pairs) {\n absl::flat_hash_set visited;\n auto tuples = GetAllTuples(tuple_shaped_instruction, &visited);\n if (!tuples) {\n return false;\n }\n for (auto tuple : *tuples) {\n CHECK_EQ(tuple->opcode(), HloOpcode::kTuple);\n if (!InstructionsComputeSameValue(tuple->mutable_operand(i1),\n tuple->mutable_operand(i2),\n visited_pairs)) {\n return false;\n }\n }\n return true;\n}\nbool ArCrsCombiner::TestInstructionsComputeSameValue(HloInstruction* i1,\n HloInstruction* i2) {\n ArCrsCombiner combiner(2,\n false);\n auto module = i1->GetModule();\n CHECK_EQ(module, i2->GetModule());\n combiner.call_graph_ = CallGraph::Build(module);\n absl::flat_hash_map visited_pairs;\n return combiner.InstructionsComputeSameValue(i1, i2, &visited_pairs);\n}\nbool ArCrsCombiner::InstructionsComputeSameValue(\n HloInstruction* i1, HloInstruction* i2,\n absl::flat_hash_map* visited_pairs) {\n if (i1 == i2) {\n return true;\n }\n auto uid1 = i1->unique_id();\n auto uid2 = i2->unique_id();\n auto min_uid = std::min(uid1, uid2);\n auto max_uid = std::max(uid1, uid2);\n auto it = visited_pairs->find(min_uid);\n if (it != visited_pairs->end() && max_uid == it->second) {\n return true;\n }\n auto opcode1 = i1->opcode();\n auto operands1 = i1->operands();\n if (opcode1 != i2->opcode() || operands1.size() != i2->operands().size()) {\n return false;\n }\n auto eq_computations = [](const HloComputation* a, const HloComputation* b) {\n return *a == *b;\n };\n auto eq_operands = [](const HloInstruction*, const HloInstruction*) {\n return true;\n };\n if (i1->IsCrossModuleAllReduce()) {\n return i1->Identical(*i2, eq_operands, eq_computations,\n false);\n }\n visited_pairs->emplace(min_uid, max_uid);\n for (int i = 0; i < operands1.size(); ++i) {\n auto operand1 = operands1[i];\n auto operand2 = i2->operands()[i];\n if (!InstructionsComputeSameValue(operand1, operand2, visited_pairs)) {\n return false;\n }\n }\n if (opcode1 == HloOpcode::kParameter) {\n return false;\n }\n if (opcode1 == HloOpcode::kGetTupleElement) {\n return i1->tuple_index() == i2->tuple_index() ||\n TupleElementsComputeSameValue(operands1[0], i1->tuple_index(),\n i2->tuple_index(), visited_pairs);\n }\n auto eq_instructions = [](const HloInstruction* i1,\n const HloInstruction* i2) -> bool { return true; };\n return i1->Identical(*i2, eq_instructions, eq_computations,\n false);\n}\nvoid ArCrsCombiner::GroupAllReducesById(HloModule* module) {\n absl::flat_hash_set discarded_ar_ids;\n for (HloComputation* computation : module->MakeNonfusionComputations()) {\n for (HloInstruction* instruction : computation->instructions()) {\n auto maybe_pair = MatchesArCrsPattern(instruction);\n if (maybe_pair) {\n auto pair = *maybe_pair;\n int64_t ar_id = *(instruction->channel_id());\n if (discarded_ar_ids.find(ar_id) != discarded_ar_ids.end()) {\n continue;\n }\n auto it = crs_reserved_map_.find(pair.crs);\n if (it != crs_reserved_map_.end()) {\n auto prev_ar_id = it->second;\n CHECK(all_reduce_map_.find(ar_id) == all_reduce_map_.end());\n CHECK_NE(prev_ar_id, ar_id);\n auto prev_pair = all_reduce_map_[prev_ar_id].back();\n int64_t prev_distance = prev_pair.distance;\n if (prev_distance < pair.distance) {\n VLOG(2) << \"Replacing ArCrsPair: \" << prev_pair.ToString()\n << \" with ArCrsPair: \" << pair.ToString();\n all_reduce_map_.erase(prev_ar_id);\n discarded_ar_ids.insert(prev_ar_id);\n all_reduce_map_[ar_id].push_back(pair);\n crs_reserved_map_[pair.crs] = ar_id;\n } else {\n discarded_ar_ids.insert(ar_id);\n }\n } else {\n if (all_reduce_map_.find(ar_id) != all_reduce_map_.end()) {\n int64_t prev_distance = all_reduce_map_[ar_id].back().distance;\n CHECK_EQ(prev_distance, pair.distance)\n << \"All ARs with the same AR ID must have the same distance \"\n \"from the corresponding CRSs. Found: \"\n << prev_distance << \" and \" << pair.distance;\n }\n all_reduce_map_[ar_id].push_back(pair);\n crs_reserved_map_[pair.crs] = ar_id;\n }\n }\n }\n }\n}\nabsl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsMPMD() {\n for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {\n auto copy_it = it++; \n auto channel_id = copy_it->first;\n VLOG(2)\n << \"KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: \"\n << channel_id << \"\\n\";\n auto pairs_vec = copy_it->second;\n TF_RET_CHECK(pairs_vec.size() == num_spatial_partitions_);\n auto instr_0 = pairs_vec[0].ar;\n for (int i = 1; i < pairs_vec.size(); ++i) {\n auto instr_i = pairs_vec[i].ar;\n auto next_0 = instr_0->users()[0];\n auto next_i = instr_i->users()[0];\n absl::flat_hash_map visited_pairs;\n while (true) {\n if (!InstructionsComputeSameValue(next_0, next_i, &visited_pairs)) {\n all_reduce_map_.erase(copy_it);\n VLOG(2) << \"KeepProvablyEqualInstructionGroups. Erased AllReduce \"\n \"channel id: \"\n << channel_id << \"\\n\";\n break;\n }\n if (next_0->IsCrossReplicaAllReduce()) {\n break;\n }\n next_0 = next_0->users()[0];\n next_i = next_i->users()[0];\n }\n }\n }\n return absl::OkStatus();\n}\nabsl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsSPMD(\n HloModule* module) {\n TF_ASSIGN_OR_RETURN(\n auto replication_analysis,\n HloReplicationAnalysis::Run(module, true));\n for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {\n auto copy_it = it++; \n auto channel_id = copy_it->first;\n VLOG(2)\n << \"KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: \"\n << channel_id << \"\\n\";\n auto pairs_vec = copy_it->second;\n TF_RET_CHECK(pairs_vec.size() == 1);\n auto instr = pairs_vec[0].ar;\n auto next = instr->users()[0];\n while (true) {\n TF_RET_CHECK(next->shape().IsArray());\n if (!replication_analysis->HloInstructionIsReplicatedAt(next, {})) {\n all_reduce_map_.erase(copy_it);\n VLOG(2) << \"KeepProvablyEqualInstructionGroups. Erased AllReduce \"\n \"channel id: \"\n << channel_id << \"\\n\";\n break;\n }\n if (next->IsCrossReplicaAllReduce()) {\n break;\n }\n next = next->users()[0];\n }\n }\n return absl::OkStatus();\n}\nabsl::StatusOr ArCrsCombiner::RewriteGraph() {\n if (all_reduce_map_.empty()) {\n return false;\n }\n for (const auto& it : all_reduce_map_) {\n auto pairs_vec = it.second;\n for (auto pair : pairs_vec) {\n auto all_reduce = pair.ar;\n auto parent_computation = all_reduce->parent();\n auto channel_id = all_reduce->channel_id();\n auto prev = all_reduce->mutable_operand(0);\n auto next = all_reduce->users()[0];\n TF_CHECK_OK(all_reduce->ReplaceUseWith(next, prev));\n TF_CHECK_OK(parent_computation->RemoveInstruction(all_reduce));\n while (!next->IsCrossReplicaAllReduce()) {\n switch (next->opcode()) {\n case HloOpcode::kBitcast:\n case HloOpcode::kTranspose:\n case HloOpcode::kReshape:\n case HloOpcode::kConvert:\n case HloOpcode::kMultiply:\n break;\n case HloOpcode::kAdd:\n case HloOpcode::kSubtract: {\n auto other_operand = (next->operands()[0] == prev)\n ? next->operands()[1]\n : next->operands()[0];\n if (other_operand->IsCrossModuleAllReduce() &&\n other_operand->user_count() == 1) {\n TF_CHECK_OK(other_operand->ReplaceAllUsesWith(\n other_operand->mutable_operand(0)));\n } else {\n auto shape = other_operand->shape();\n Literal lit(shape);\n lit.PopulateWithValue(num_spatial_partitions_);\n auto divisor = parent_computation->AddInstruction(\n HloInstruction::CreateConstant(lit.Clone()));\n auto division = parent_computation->AddInstruction(\n HloInstruction::CreateBinary(shape, HloOpcode::kDivide,\n other_operand, divisor));\n TF_CHECK_OK(other_operand->ReplaceUseWith(next, division));\n }\n break;\n }\n default:\n LOG(FATAL) << \"Unexpected instruction: \" << next->ToShortString();\n }\n prev = next;\n next = next->users()[0];\n }\n next->set_channel_id(channel_id);\n }\n }\n return true;\n}\nabsl::StatusOr ArCrsCombiner::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n call_graph_ = CallGraph::Build(module);\n GroupAllReducesById(module);\n if (spmd_partition_) {\n TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsSPMD(module));\n } else {\n TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsMPMD());\n }\n TF_ASSIGN_OR_RETURN(auto changed, RewriteGraph());\n if (module->config().replica_count() > 1 && spmd_partition_) {\n TF_ASSIGN_OR_RETURN(auto replaced, ReplaceReplicatedAllReduce(\n module, num_spatial_partitions_));\n changed |= replaced;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/ar_crs_combiner.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass ArCrsCombinerTest : public HloTestBase {};\nTEST_F(ArCrsCombinerTest, SameValueTestBasecase) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})\n %constant.f32.2 = f32[2,2] constant({{1, 2}, {3, 4}})\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(\n i1, module->entry_computation()->parameter_instruction(0)));\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestBasecase2) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (x: f32[]) -> (f32[], f32[]) {\n %x = f32[] parameter(0)\n ROOT %tuple = (f32[], f32[]) tuple(%x, %x)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestBasecase3) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (x: f32[], y: f32[]) -> (f32[], f32[]) {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %tuple = (f32[], f32[]) tuple(%x, %y)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestNumOperands) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> ((f32[2,2]), (f32[2,2], f32[2,2])) {\n %p = f32[2,2] parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %tuple1 = (f32[2,2]) tuple(%constant.f32)\n %tuple2 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %tuple = ((f32[2,2]), (f32[2,2], f32[2,2])) tuple(%tuple1, %tuple2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesMatch) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {\n %p = f32[2] parameter(0)\n %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}\n %slice.2 = f32[1] slice(f32[2] %p), slice={[0:1]}\n ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesDontMatch) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {\n %p = f32[2] parameter(0)\n %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}\n %slice.2 = f32[1] slice(f32[2] %p), slice={[1:2]}\n ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestTupleElementSameIndex) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=0\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex1) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex2) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})\n %constant.f32.2 = f32[2,2] constant({{2, 3}, {4, 5}})\n %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestWhile1) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.0 = s32[] constant(0)\n %constant.1 = s32[] constant(1)\n ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT\n}\n%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)\n %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto body_tuple = root_while->while_body()->root_instruction();\n auto i1 = body_tuple->operands()[0];\n auto i2 = body_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestWhile2) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.0 = s32[] constant(0)\n %constant.1 = s32[] constant(1)\n ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT\n}\n%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)\n %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32.1 = f32[2,2] constant({{3, 4}, {5, 6}})\n %constant.f32.2 = f32[2,2] constant({{3, 4}, {7, 8}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto body_tuple = root_while->while_body()->root_instruction();\n auto i1 = body_tuple->operands()[0];\n auto i2 = body_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestWhile3) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.0 = s32[] constant(0)\n %constant.1 = s32[] constant(1)\n ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT\n}\n%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})\n %constant.f32.2 = f32[2,2] constant({{3, 4}, {1, 2}})\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32.1)\n %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32.2)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto body_tuple = root_while->while_body()->root_instruction();\n auto i1 = body_tuple->operands()[0]->operands()[0]; \n auto i2 = body_tuple->operands()[1]->operands()[0]; \n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestNestedWhile) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n ROOT %t = pred[] constant(true)\n}\n%body_inner (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %gte.1 = f32[2,2] get-tuple-element(%x), index=0\n %gte.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%gte.1, %constant.f32)\n %add.2 = f32[2,2] add(%gte.2, %constant.f32)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\n%body_outer (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %gte.1 = f32[2,2] get-tuple-element(%x), index=0\n %gte.2 = f32[2,2] get-tuple-element(%x), index=1\n %init = (f32[2,2], f32[2,2]) tuple(%gte.1, %gte.2)\n ROOT %while.1 = (f32[2,2], f32[2,2]) while(%init), condition=%condition,\n body=%body_inner\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition,\n body=%body_outer\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto inner_while = root_while->while_body()->root_instruction();\n auto i1 = inner_while->while_body()->root_instruction()->operands()[0];\n auto i2 = inner_while->while_body()->root_instruction()->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nvoid CompareReplicaGroups(absl::Span groups_before,\n absl::Span groups_after) {\n ASSERT_EQ(groups_before.size(), groups_after.size());\n for (int i = 0; i < groups_before.size(); ++i) {\n auto group_before = groups_before[i];\n std::vector ids_before(group_before.replica_ids().begin(),\n group_before.replica_ids().end());\n auto group_after = groups_after[i];\n std::vector ids_after(group_after.replica_ids().begin(),\n group_after.replica_ids().end());\n EXPECT_EQ(ids_before, ids_after);\n }\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {\n %p = bf16[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%convert.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%convert.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Convert(op::Parameter())),\n op::AllReduce(op::Convert(op::Constant()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1)\n %all-reduce.1 = f32[]\n all-reduce(%convert.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2, true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Convert(op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArBitcastCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.1 (a: f32[2,1], b: f32[2,1]) -> f32[2,1] {\n %a = f32[2,1] parameter(0)\n %b = f32[2,1] parameter(1)\n ROOT %add = f32[2,1] add(%a, %b)\n}\n%sum.2 (x: f32[2], y: f32[2]) -> f32[2] {\n %x = f32[2] parameter(0)\n %y = f32[2] parameter(1)\n ROOT %add = f32[2] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[2,1]) -> (f32[2], f32[2]) {\n %p = f32[2,1] parameter(0)\n %all-reduce.ar.1 = f32[2,1]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=0}\n %bitcast.1 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.1)\n %all-reduce.1 = f32[2]\n all-reduce(%bitcast.1),\n replica_groups={{0,1}},\n to_apply=%sum.2,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[2,1]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=1}\n %bitcast.2 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.2)\n %all-reduce.2 = f32[2]\n all-reduce(%bitcast.2),\n replica_groups={{0,1}},\n to_apply=%sum.2,\n sharding={maximal device=1}\n ROOT %tuple = (f32[2], f32[2])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Bitcast(op::Parameter())),\n op::AllReduce(op::Bitcast(op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArMultiplyCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %multiply.1 = f32[]\n multiply(%all-reduce.ar.1, %constant.f32),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%multiply.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=1}\n %multiply.2 = f32[]\n multiply(%all-reduce.ar.2, %constant.f32),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%multiply.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())),\n op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArMultiplyCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.f32\n %multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32)\n %all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=0}\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertAddCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32 = f32[] constant(2)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %add.1 = f32[]\n add(%constant.f32, %convert.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%add.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %add.2 = f32[]\n add(%constant.f32, %convert.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%add.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(\n op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Convert())),\n op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Convert()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertAddCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32 = f32[] constant(2)\n %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0}\n %add.1 = f32[] add(%constant.f32, %convert.1)\n %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Divide(op::Constant(), op::Constant()), op::Convert()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewrite) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32.1 = f32[] constant(2)\n %constant.f32.2 = f32[] constant(3)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %add.1 = f32[]\n add(%constant.f32.1, %convert.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%add.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %add.2 = f32[]\n add(%constant.f32.2, %convert.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%add.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewriteSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32.1 = f32[] constant(2)\n %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1)\n %add.1 = f32[] add(%p, %convert.1)\n %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, ArThenCrsDontCrash) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.1 (a: f32[], b: f32[]) -> f32[] {\n %a = f32[] parameter(0)\n %b = f32[] parameter(1)\n ROOT %add = f32[] add(%a, %b)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%all-reduce.ar.1),\n replica_groups={{0,1}},\n to_apply=%sum.1,\n sharding={maximal device=0}\n %multiply.1 = f32[]\n multiply(%all-reduce.1, %constant.f32),\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%all-reduce.ar.2),\n replica_groups={{0,1}},\n to_apply=%sum.1,\n sharding={maximal device=1}\n %multiply.2 = f32[]\n multiply(%all-reduce.2, %constant.f32),\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Parameter()),\n op::AllReduce(op::Parameter())));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleAdds) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.1 = f32[] constant(1)\n %constant.2 = f32[] constant(2)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %add.11 = f32[]\n add(%constant.1, %all-reduce.ar.1),\n sharding={maximal device=0}\n %add.12 = f32[]\n add(%constant.2, %add.11),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%add.12),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %add.21 = f32[]\n add(%constant.1, %all-reduce.ar.2),\n sharding={maximal device=0}\n %add.22 = f32[]\n add(%constant.2, %add.21),\n sharding={maximal device=0}\n %all-reduce.2 = f32[]\n all-reduce(%add.22),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Divide(op::Constant(), op::Constant()),\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Parameter()))),\n op::AllReduce(op::Add(\n op::Divide(op::Constant(), op::Constant()),\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Parameter())))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleAddsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.1 = f32[] constant(1)\n %constant.2 = f32[] constant(2)\n %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum\n %add.11 = f32[] add(%constant.1, %all-reduce.ar.1)\n %add.12 = f32[] add(%constant.2, %add.11)\n %all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Parameter())))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArSubtractCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %sub.1 = f32[]\n subtract(%constant.f32, %all-reduce.ar.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%sub.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=1}\n %sub.2 = f32[]\n subtract(%constant.f32, %all-reduce.ar.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%sub.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(\n op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),\n op::Parameter())),\n op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),\n op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArSubtractCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.f32\n %sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1)\n %all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Subtract(\n op::Divide(op::Constant(), op::Constant()), op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsLeft) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %add11 = f32[]\n add(%ar11, %const1),\n sharding={maximal device=0}\n %ar12 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=0}\n %add12 = f32[]\n add(%add11, %ar12),\n sharding={maximal device=0}\n %crs1 = f32[]\n all-reduce(%add12),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n %ar21 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=1}\n %add21 = f32[]\n add(%ar21, %const1),\n sharding={maximal device=1}\n %ar22 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=1}\n %add22 = f32[]\n add(%add21, %ar22),\n sharding={maximal device=1}\n %crs2 = f32[]\n all-reduce(%add22),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%crs1, %crs2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant())),\n op::Parameter())),\n op::AllReduce(op::Add(\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant())),\n op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsLeftSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1,\n to_apply=%sum\n %add11 = f32[] add(%ar11, %const1)\n %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2,\n to_apply=%sum\n %add12 = f32[] add(%add11, %ar12)\n %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}},\n to_apply=%sum\n ROOT %tuple = (f32[]) tuple(%crs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())),\n op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsRight) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %ar12 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=0}\n %add11 = f32[]\n add(%ar12, %const1),\n sharding={maximal device=0}\n %add12 = f32[]\n add(%ar11, %add11),\n sharding={maximal device=0}\n %crs1 = f32[]\n all-reduce(%add12),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n %ar21 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=1}\n %ar22 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=1}\n %add21 = f32[]\n add(%ar22, %const1),\n sharding={maximal device=1}\n %add22 = f32[]\n add(%ar21, %add21),\n sharding={maximal device=1}\n %crs2 = f32[]\n all-reduce(%add22),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%crs1, %crs2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Parameter(),\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant())))),\n op::AllReduce(op::Add(\n op::Parameter(),\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant()))))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsRightSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum\n %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum\n %add11 = f32[] add(%ar12, %const1)\n %add12 = f32[] add(%ar11, %add11)\n %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum\n ROOT %tuple = (f32[]) tuple(%crs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Parameter(),\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant()))))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, OneReplicaDontRewrite) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {\n %p = bf16[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%p),\n replica_groups={{0}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%convert.1),\n replica_groups={{0}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%convert.2),\n replica_groups={{0}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 1));\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, OneReplicaDontRewriteSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}},\n channel_id=1, to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1)\n %all-reduce.1 = f32[] all-reduce(%convert.1),\n replica_groups={{0}}, to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 1));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, SameValueTestConditional) {\n const char* module_str = R\"(\nHloModule foobar\nbranch_true {\n pt = (f32[2,4], f32[2,4]) parameter(0)\n gte.0 = f32[2,4] get-tuple-element(pt), index=0\n gte.1 = f32[2,4] get-tuple-element(pt), index=1\n ROOT tuple.t = (f32[2,4], f32[2,4]) tuple(gte.1, gte.0)\n}\nbranch_false {\n pf = (f32[2,4], f32[2,4]) parameter(0)\n gte.0 = f32[2,4] get-tuple-element(pf), index=0\n gte.1 = f32[2,4] get-tuple-element(pf), index=1\n add = f32[2,4] add(gte.1, gte.1)\n ROOT tuple.f = (f32[2,4], f32[2,4]) tuple(gte.0, add)\n}\nENTRY Parameters1.v4 {\n constant = pred[] constant(true)\n p = f32[2,4] parameter(0)\n tuple = (f32[2,4], f32[2,4]) tuple(p, p)\n ROOT conditional = (f32[2,4], f32[2,4]) conditional(constant, tuple, tuple), true_computation=branch_true, false_computation=branch_false\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto cond = module->entry_computation()->root_instruction();\n auto branch_true = cond->branch_computation(0)->root_instruction();\n auto t0 = branch_true->mutable_operand(0);\n auto t1 = branch_true->mutable_operand(1);\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(t0, t1));\n auto branch_false = cond->branch_computation(1)->root_instruction();\n auto f0 = branch_false->mutable_operand(0);\n auto f1 = branch_false->mutable_operand(1);\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(f0, f1));\n}\nTEST_F(ArCrsCombinerTest, AllReduceWithReplicas) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=0}\n %all-reduce.1 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=1}\n %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=0}\n %all-reduce.3 = f32[] all-reduce(%all-reduce.1), replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.2, %all-reduce.3),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, AllReduceWithReplicasSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0},{1}},\n to_apply=%sum.f32\n %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0},{1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, ReplaceReplicatedAllReduceSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[2,4]) -> f32[2,4] {\n %p = f32[2,4] parameter(0), sharding={replicated}\n ROOT %all-reduce = f32[2,4] all-reduce(%p), to_apply=%sum.f32,\n replica_groups={{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 32));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::Divide(op::AllReduce(op::Parameter()),\n op::Broadcast(op::Constant())));\n auto ar = root->operand(0);\n auto divisor = root->operand(1)->operand(0);\n EXPECT_TRUE(ar->channel_id());\n EXPECT_TRUE(divisor->literal().IsAllFloat(2));\n}\nTEST_F(ArCrsCombinerTest, AllReduceWithGlobalIdReplicaGroups) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.0 = f32[] all-reduce(%p), channel_id=1,\n replica_groups={{0,1,2,3},{4,5,6,7}}, use_global_device_ids=true,\n to_apply=%sum.f32\n %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2,\n 4));\n ArCrsCombiner combiner(4,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1148,"cells":{"ID":{"kind":"string","value":"f1c72f99-0fb9-453a-a24b-21b943ceb2cf"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dynamic_dimension_simplifier"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/dynamic_dimension_simplifier.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/dynamic_dimension_simplifier.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/status_macros.h\"\nnamespace xla {\nnamespace {\nabsl::StatusOr ConcatForwarding(HloInstruction* concat) {\n if (concat->opcode() != HloOpcode::kConcatenate) {\n return false;\n }\n bool changed = false;\n auto parent = concat->parent();\n std::vector new_operands;\n for (HloInstruction* operand : concat->operands()) {\n if (operand->opcode() != HloOpcode::kConcatenate ||\n operand->concatenate_dimension() != concat->concatenate_dimension()) {\n new_operands.push_back(operand);\n } else {\n changed = true;\n for (HloInstruction* operand_operand : operand->operands()) {\n new_operands.push_back(operand_operand);\n }\n }\n }\n if (changed) {\n auto new_concat = parent->AddInstruction(HloInstruction::CreateConcatenate(\n concat->shape(), new_operands, concat->concatenate_dimension()));\n TF_RETURN_IF_ERROR(parent->ReplaceInstruction(concat, new_concat));\n }\n return changed;\n}\nabsl::StatusOr SliceConcatForwarding(HloInstruction* slice) {\n if (slice->opcode() != HloOpcode::kSlice) {\n return false;\n }\n auto concat = slice->mutable_operand(0);\n if (concat->opcode() != HloOpcode::kConcatenate) {\n return false;\n }\n if (slice->shape().rank() != 1) {\n return false;\n }\n int64_t concat_dim = concat->concatenate_dimension();\n std::vector new_operands;\n int64_t size_so_far = 0;\n int64_t slice_size = slice->shape().dimensions(concat_dim);\n if (slice_size != slice->slice_limits(0) - slice->slice_starts(0)) {\n return false;\n }\n if (slice->slice_strides(0) != 1) {\n return false;\n }\n for (HloInstruction* operand : concat->operands()) {\n if (size_so_far == slice->slice_starts(0) &&\n operand->shape().dimensions(0) == slice_size) {\n TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(operand));\n return true;\n }\n size_so_far += operand->shape().dimensions(concat_dim);\n }\n return false;\n}\nabsl::StatusOr ReshapeBroadcastForwarding(HloInstruction* reshape) {\n if (reshape->opcode() != HloOpcode::kReshape) {\n return false;\n }\n auto broadcast = reshape->mutable_operand(0);\n if (broadcast->opcode() != HloOpcode::kBroadcast) {\n return false;\n }\n if (reshape->shape().rank() != 0) {\n return false;\n }\n if (broadcast->shape().rank() != 1) {\n return false;\n }\n if (broadcast->mutable_operand(0)->shape().rank() != 0) {\n return false;\n }\n TF_RETURN_IF_ERROR(\n reshape->ReplaceAllUsesWith(broadcast->mutable_operand(0)));\n return true;\n}\nabsl::StatusOr ReshapeReshapeForwarding(HloInstruction* reshape) {\n if (reshape->opcode() != HloOpcode::kReshape) {\n return false;\n }\n auto reshape_2 = reshape->mutable_operand(0);\n if (reshape_2->opcode() != HloOpcode::kReshape) {\n return false;\n }\n if (!Shape::Equal()(reshape->shape(), reshape_2->operand(0)->shape())) {\n return false;\n }\n TF_RETURN_IF_ERROR(\n reshape->ReplaceAllUsesWith(reshape_2->mutable_operand(0)));\n return true;\n}\nabsl::StatusOr IdentityConvertRemoving(HloInstruction* convert) {\n if (convert->opcode() != HloOpcode::kConvert) {\n return false;\n }\n auto operand = convert->mutable_operand(0);\n if (Shape::Equal()(convert->shape(), operand->shape())) {\n TF_RETURN_IF_ERROR(convert->ReplaceAllUsesWith(operand));\n return true;\n }\n return false;\n}\nabsl::StatusOr IdentityReshapeRemoving(HloInstruction* reshape) {\n if (reshape->opcode() != HloOpcode::kReshape) {\n return false;\n }\n auto operand = reshape->mutable_operand(0);\n if (Shape::Equal()(reshape->shape(), operand->shape())) {\n TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(operand));\n return true;\n }\n return false;\n}\n} \nabsl::StatusOr DynamicDimensionSimplifier::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n XLA_VLOG_LINES(\n 2, \"DynamicDimensionSimplifier::Run(), before:\\n\" + module->ToString());\n bool changed = false;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(bool local_changed, ConcatForwarding(inst));\n changed |= local_changed;\n }\n }\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(bool local_changed, SliceConcatForwarding(inst));\n changed |= local_changed;\n }\n }\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeBroadcastForwarding(inst));\n changed |= local_changed;\n }\n }\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeReshapeForwarding(inst));\n changed |= local_changed;\n }\n }\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(bool local_changed, IdentityConvertRemoving(inst));\n changed |= local_changed;\n }\n }\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n TF_ASSIGN_OR_RETURN(bool local_changed, IdentityReshapeRemoving(inst));\n changed |= local_changed;\n }\n }\n XLA_VLOG_LINES(\n 2, \"DynamicDimensionSimplifier::Run(), after:\\n\" + module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/dynamic_dimension_simplifier.h\"\n#include \n#include \n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/literal.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/types.h\"\n#include \"xla/window_util.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla {\nnamespace {\nnamespace m = match;\nclass DynamicDimensionSimplifierTest : public HloTestBase {};\nTEST_F(DynamicDimensionSimplifierTest, ForwardConcat) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[1] parameter(0)\n p1 = s32[1] parameter(1)\n p2 = s32[1] parameter(2)\n concat1 = s32[2] concatenate(p0, p1), dimensions={0}\n ROOT concat2 = s32[3] concatenate(concat1, p2), dimensions={0}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_TRUE(simplifier.Run(m.get()).value());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Concatenate(m::Parameter(0), m::Parameter(1),\n m::Parameter(2))));\n}\nTEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatMultipleDims) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[1, 1] parameter(0)\n p1 = s32[1, 1] parameter(1)\n p2 = s32[2, 1] parameter(2)\n concat1 = s32[2, 1] concatenate(p0, p1), dimensions={0}\n ROOT concat2 = s32[2, 2] concatenate(concat1, p2), dimensions={1}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_FALSE(simplifier.Run(m.get()).value());\n}\nTEST_F(DynamicDimensionSimplifierTest, ForwardConcatSlice) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[1] parameter(0)\n p1 = s32[1] parameter(1)\n p2 = s32[1] parameter(2)\n concat = s32[3] concatenate(p0, p1, p2), dimensions={0}\n ROOT slice = s32[1] slice(concat), slice={[1:2]}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_TRUE(simplifier.Run(m.get()).value());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Parameter(1)));\n}\nTEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceSizeMismatch) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[1] parameter(0)\n p1 = s32[1] parameter(1)\n p2 = s32[1] parameter(2)\n concat = s32[3] concatenate(p0, p1, p2), dimensions={0}\n ROOT slice = s32[2] slice(concat), slice={[1:3]}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_FALSE(simplifier.Run(m.get()).value());\n}\nTEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceStrided) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[1] parameter(0)\n p1 = s32[1] parameter(1)\n p2 = s32[1] parameter(2)\n concat = s32[3] concatenate(p0, p1, p2), dimensions={0}\n ROOT slice = s32[1] slice(concat), slice={[1:2:2]}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_FALSE(simplifier.Run(m.get()).value());\n}\nTEST_F(DynamicDimensionSimplifierTest, BroadcastReshapeForwarding) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[] parameter(0)\n broadcast = s32[1] broadcast(p0), dimensions={}\n ROOT reshape = s32[] reshape(broadcast)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_TRUE(simplifier.Run(m.get()).value());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Parameter(0)));\n}\nTEST_F(DynamicDimensionSimplifierTest, ReshapeReshapeForwarding) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[] parameter(0)\n reshape = s32[1] reshape(p0)\n ROOT reshape2 = s32[] reshape(reshape)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_TRUE(simplifier.Run(m.get()).value());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Parameter(0)));\n}\nTEST_F(DynamicDimensionSimplifierTest,\n DoNotReshapeReshapeForwardingShapeMismatch) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[1, 1] parameter(0)\n reshape = s32[1] reshape(p0)\n ROOT reshape2 = s32[] reshape(reshape)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_FALSE(simplifier.Run(m.get()).value());\n}\nTEST_F(DynamicDimensionSimplifierTest, IdConvertRemoving) {\n const char* kModuleStr = R\"(\n HloModule m\n test {\n p0 = s32[1] parameter(0)\n ROOT reshape2 = s32[1] convert(p0)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));\n DynamicDimensionSimplifier simplifier;\n ASSERT_TRUE(simplifier.Run(m.get()).value());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Parameter(0)));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1149,"cells":{"ID":{"kind":"string","value":"b2d66996-ec37-441c-9dba-74eedda21742"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"all_reduce_simplifier"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/all_reduce_simplifier.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/all_reduce_simplifier_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/all_reduce_simplifier.h\"\n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/hlo_replication_analysis.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nabsl::StatusOr AllReduceSimplifier::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n TF_ASSIGN_OR_RETURN(\n auto replication,\n HloReplicationAnalysis::Run(module, false));\n std::vector> all_reduces_to_replace;\n auto get_participant_counts_for_replica_group =\n [](const HloInstruction* all_reduce) -> absl::StatusOr {\n const HloModuleConfig& config = all_reduce->GetModule()->config();\n TF_ASSIGN_OR_RETURN(\n CollectiveOpGroupMode group_mode,\n GetCollectiveOpGroupMode(all_reduce->channel_id().has_value(),\n Cast(all_reduce)\n ->use_global_device_ids()));\n int64_t num_devices = config.num_partitions();\n int64_t num_replicas = config.replica_count();\n TF_ASSIGN_OR_RETURN(std::vector participant_counts,\n GetPariticipantCountsForReplicaGroups(\n num_replicas, num_devices,\n all_reduce->replica_groups(), group_mode));\n if (participant_counts.empty()) {\n return -1;\n }\n if (!absl::c_all_of(participant_counts, [&](int64_t participant_count) {\n return participant_count == participant_counts[0];\n })) {\n return -1;\n }\n return participant_counts[0];\n };\n bool changed = false;\n for (auto computation : module->computations(execution_threads)) {\n for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {\n if ((inst->opcode() == HloOpcode::kAllGather ||\n inst->opcode() == HloOpcode::kReduceScatter) &&\n ShapeUtil::Compatible(inst->shape(), inst->operand(0)->shape())) {\n changed = true;\n TF_RETURN_IF_ERROR(\n computation->ReplaceInstruction(inst, inst->mutable_operand(0)));\n }\n }\n }\n for (auto computation : module->computations(execution_threads)) {\n for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {\n if (!inst->shape().IsArray()) {\n continue;\n }\n if (!inst->IsCrossReplicaAllReduce() && !inst->IsCrossModuleAllReduce()) {\n continue;\n }\n TF_ASSIGN_OR_RETURN(int64_t group_size,\n get_participant_counts_for_replica_group(inst));\n if (group_size == -1 ||\n (!inst->IsCrossReplicaAllReduce() && group_size != 1) ||\n (!inst->IsCrossReplicaAllReduce() &&\n !module->config().use_spmd_partitioning())) {\n continue;\n }\n if (replication->HloInstructionIsReplicatedAt(inst->operand(0), {}) ||\n group_size == 1) {\n all_reduces_to_replace.push_back({inst, group_size});\n }\n }\n }\n for (auto all_reduce_and_group_size : all_reduces_to_replace) {\n auto all_reduce = all_reduce_and_group_size.first;\n const int64_t replica_group_size = all_reduce_and_group_size.second;\n if (replica_group_size == 1) {\n TF_RETURN_IF_ERROR(all_reduce->parent()->ReplaceInstruction(\n all_reduce, all_reduce->mutable_operand(0)));\n changed = true;\n continue;\n }\n if (all_reduce->to_apply()->instruction_count() != 3 ||\n all_reduce->to_apply()->num_parameters() != 2) {\n continue;\n }\n HloInstruction* replacement;\n switch (all_reduce->to_apply()->root_instruction()->opcode()) {\n case HloOpcode::kAdd: {\n auto multiplier =\n all_reduce->parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(replica_group_size)));\n if (all_reduce->shape().element_type() != S32) {\n multiplier = all_reduce->parent()->AddInstruction(\n HloInstruction::CreateConvert(\n ShapeUtil::ChangeElementType(\n multiplier->shape(), all_reduce->shape().element_type()),\n multiplier));\n }\n if (all_reduce->shape().rank() > 0) {\n multiplier = all_reduce->parent()->AddInstruction(\n HloInstruction::CreateBroadcast(all_reduce->shape(), multiplier,\n {}));\n }\n replacement =\n all_reduce->parent()->AddInstruction(HloInstruction::CreateBinary(\n all_reduce->shape(), HloOpcode::kMultiply,\n all_reduce->mutable_operand(0), multiplier));\n break;\n }\n case HloOpcode::kMinimum:\n case HloOpcode::kMaximum:\n case HloOpcode::kOr:\n case HloOpcode::kAnd:\n replacement = all_reduce->mutable_operand(0);\n break;\n default:\n continue;\n }\n VLOG(2) << \"Replacing \" << all_reduce->ToString() << \" with \"\n << replacement->ToString();\n TF_RETURN_IF_ERROR(all_reduce->ReplaceAllUsesWith(replacement));\n changed = true;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/all_reduce_simplifier.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace m = match;\nusing AllReduceSimplifierTest = HloTestBase;\nTEST_F(AllReduceSimplifierTest, ReplicatedParameters) {\n const char* kModuleStr = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nmax {\n a.1 = f32[] parameter(0)\n b.1 = f32[] parameter(1)\n ROOT max = f32[] maximum(a.1, b.1)\n}\nmin {\n a.2 = f32[] parameter(0)\n b.2 = f32[] parameter(1)\n ROOT min = f32[] minimum(a.2, b.2)\n}\nsum.1 {\n a.3 = f32[] parameter(0)\n b.3 = f32[] parameter(1)\n ROOT add.1 = f32[] add(a.3, b.3)\n}\ntest {\n p0 = f32[8,16] parameter(0), parameter_replication={true}\n p1 = f32[8,16] parameter(1), parameter_replication={false}\n p2 = f32[] parameter(2), parameter_replication={true}\n all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=sum\n all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max\n all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={}, to_apply=min\n all-reduce.3 = f32[] all-reduce(p2), replica_groups={}, to_apply=sum.1\n ROOT tuple = (f32[8,16], f32[8,16], f32[8,16], f32[]) tuple(all-reduce, all-reduce.1, all-reduce.2, all-reduce.3)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(\n kModuleStr, 8));\n AllReduceSimplifier simplifier(8);\n ASSERT_TRUE(simplifier.Run(module.get()).value());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::MultiplyAnyOrder(m::Parameter(0),\n m::Broadcast(m::Convert(m::ConstantScalar(8)))),\n m::Parameter(0), m::AllReduce(m::Parameter(1)),\n m::MultiplyAnyOrder(m::Parameter(2),\n m::Convert(m::ConstantScalar(8))))));\n}\nTEST_F(AllReduceSimplifierTest, AllReduceAfterAllReduce) {\n const char* kModuleStr = R\"(\nHloModule m\nmax {\n a.1 = f32[] parameter(0)\n b.1 = f32[] parameter(1)\n ROOT max = f32[] maximum(a.1, b.1)\n}\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\ntest {\n p0 = f32[8,16] parameter(0), parameter_replication={false}\n all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max\n ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce), replica_groups={}, to_apply=sum\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(\n kModuleStr, 8));\n AllReduceSimplifier simplifier(8);\n ASSERT_TRUE(simplifier.Run(module.get()).value());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::MultiplyAnyOrder(\n m::AllReduce(m::Parameter(0)),\n m::Broadcast(m::Convert(m::ConstantScalar(8))))));\n}\nTEST_F(AllReduceSimplifierTest, SubgroupAllReduce) {\n const char* kModuleStr = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nmax {\n a.1 = f32[] parameter(0)\n b.1 = f32[] parameter(1)\n ROOT max = f32[] maximum(a.1, b.1)\n}\nmin {\n a.2 = f32[] parameter(0)\n b.2 = f32[] parameter(1)\n ROOT min = f32[] minimum(a.2, b.2)\n}\ntest {\n p0 = f32[8,16] parameter(0), parameter_replication={true}\n p1 = f32[8,16] parameter(1), parameter_replication={false}\n all-reduce = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum\n all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=max\n all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=min\n ROOT tuple = (f32[8,16], f32[8,16], f32[8,16]) tuple(all-reduce, all-reduce.1, all-reduce.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(\n kModuleStr, 8));\n AllReduceSimplifier simplifier(8);\n ASSERT_TRUE(simplifier.Run(module.get()).value());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::MultiplyAnyOrder(m::Parameter(0),\n m::Broadcast(m::Convert(m::ConstantScalar(4)))),\n m::Parameter(0), m::AllReduce(m::Parameter(1)))));\n}\nTEST_F(AllReduceSimplifierTest, TrivialSubgroupAllReduce) {\n const char* kModuleStr = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\ntest {\n p0 = f32[8,16] parameter(0), parameter_replication={false}\n ROOT all-reduce = f32[8,16] all-reduce(p0),\n replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},\n to_apply=sum\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(\n kModuleStr, 8));\n AllReduceSimplifier simplifier(8);\n EXPECT_TRUE(simplifier.Run(module.get()).value());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Parameter(0)));\n}\nTEST_F(AllReduceSimplifierTest, TrivialSubgroupNonCrossReplicaAllReduce) {\n const char* kModuleStr = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\ntest {\n p0 = f32[8,16] parameter(0), parameter_replication={false}\n ROOT all-reduce = f32[8,16] all-reduce(p0),\n channel_id=1,\n use_global_device_ids=true,\n replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}},\n to_apply=sum\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,\n 8));\n module->mutable_config().set_use_spmd_partitioning(true);\n AllReduceSimplifier simplifier(1);\n EXPECT_TRUE(simplifier.Run(module.get()).value());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Parameter(0)));\n}\nTEST_F(AllReduceSimplifierTest, NonCrossReplicaAllReduceAfterAllReduce) {\n const char* kModuleStr = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\ntest {\n p0 = f32[8,16] parameter(0), parameter_replication={false}\n all-reduce = f32[8,16] all-reduce(p0),\n channel_id=1,\n use_global_device_ids=true,\n replica_groups={{0,2},{1,3},{4,6},{5,7}},\n to_apply=sum\n ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce),\n channel_id=2,\n use_global_device_ids=true,\n replica_groups={{0,4},{1,5},{2,6},{3,7}},\n to_apply=sum\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, ParseAndReturnVerifiedModule(kModuleStr, 1,\n 8));\n module->mutable_config().set_use_spmd_partitioning(true);\n AllReduceSimplifier simplifier(1);\n EXPECT_FALSE(simplifier.Run(module.get()).value());\n}\nTEST_F(AllReduceSimplifierTest, MPMDNonCrossReplicaAllReduce) {\n const char* kModuleStr = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\ntest {\n p0 = f32[8,16] parameter(0), parameter_replication={false}\n ROOT all-reduce = f32[8,16] all-reduce(p0),\n channel_id=1,\n replica_groups={{0},{1}},\n to_apply=sum\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, ParseAndReturnVerifiedModule(kModuleStr, 2,\n 1));\n module->mutable_config().set_use_spmd_partitioning(false);\n AllReduceSimplifier simplifier(2);\n EXPECT_FALSE(simplifier.Run(module.get()).value());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1150,"cells":{"ID":{"kind":"string","value":"08422a02-e75f-4e3b-88eb-cda17385701d"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"while_loop_fusible_sinking"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/while_loop_fusible_sinking.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/while_loop_fusible_sinking_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/while_loop_fusible_sinking.h\"\n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/while_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nbool IsPurelyExpanding(const HloInstruction* instr) {\n return instr->opcode() == HloOpcode::kBroadcast ||\n (instr->opcode() == HloOpcode::kConstant &&\n instr->shape().rank() == 0) ||\n instr->opcode() == HloOpcode::kIota;\n}\nbool IsFusionCandidate(const HloInstruction* instr) {\n return instr->opcode() != HloOpcode::kRng &&\n (instr->IsElementwise() || instr->opcode() == HloOpcode::kReshape ||\n instr->opcode() == HloOpcode::kTranspose);\n}\n} \nbool WhileLoopFusibleSinking::IsSinkableFusion(HloInstruction* while_operand) {\n absl::InlinedVector worklist;\n absl::flat_hash_set visited;\n worklist.push_back(while_operand);\n while (!worklist.empty()) {\n HloInstruction* to_process = worklist.back();\n worklist.pop_back();\n if (!to_process->IsFusible()) {\n return false;\n }\n if (!visited.insert(to_process->unique_id()).second) {\n if (visited.size() > 100) {\n return false;\n }\n continue;\n }\n if (IsPurelyExpanding(to_process)) {\n continue;\n }\n if (IsFusionCandidate(to_process)) {\n for (auto* op : to_process->operands()) {\n worklist.push_back(op);\n }\n continue;\n }\n return false;\n }\n return true;\n}\nHloInstruction* WhileLoopFusibleSinking::CreateSinkableFusion(\n HloInstruction* while_operand) {\n HloInstruction* fusion =\n while_operand->AddInstruction(while_operand->CreateFusion(\n while_operand->shape(), HloInstruction::FusionKind::kLoop,\n while_operand));\n bool did_fuse = IsFusionCandidate(while_operand);\n while (did_fuse) {\n did_fuse = false;\n for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {\n HloInstruction* op = fusion->mutable_operand(i);\n if (IsPurelyExpanding(op)) {\n continue;\n }\n fusion->FuseInstruction(op);\n did_fuse = true;\n break;\n }\n }\n did_fuse = true;\n while (did_fuse) {\n did_fuse = false;\n for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) {\n HloInstruction* op = fusion->mutable_operand(i);\n if (IsPurelyExpanding(op)) {\n fusion->FuseInstruction(op);\n did_fuse = true;\n break;\n }\n }\n }\n return fusion;\n}\nabsl::StatusOr WhileLoopFusibleSinking::TrySinkingFusiblesIntoWhileLoop(\n HloInstruction* while_instr) {\n HloComputation* while_cond = while_instr->while_condition();\n HloComputation* while_body = while_instr->while_body();\n if (call_counts_[while_body] > 1 || call_counts_[while_cond] > 1) {\n return false;\n }\n HloInstruction* init_value = while_instr->mutable_operand(0);\n if (init_value->opcode() != HloOpcode::kTuple) {\n return false;\n }\n bool changed = false;\n std::vector invariant_body_gtes =\n WhileUtil::GetInvariantGTEsForWhileBody(*while_body);\n std::vector tuple_indices;\n std::vector new_operands;\n for (HloInstruction* invariant_body_gte : invariant_body_gtes) {\n int64_t index = invariant_body_gte->tuple_index();\n if (while_instr->operand_count() == 0 || init_value->operand_count() == 0) {\n CHECK_EQ(while_instr->user_count(), 0);\n VLOG(3) << \"Each element in the operand tuple of the while instruction '\"\n << while_instr->name()\n << \"' was an invariant value, whose usage has been replaced \"\n \" directly by the value.\";\n break;\n }\n HloInstruction* invariant_value = init_value->mutable_operand(index);\n if (absl::c_any_of(invariant_body_gte->users(),\n [](const HloInstruction* use) {\n switch (use->opcode()) {\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kGather:\n case HloOpcode::kSlice:\n return true;\n default:\n return false;\n }\n })) {\n continue;\n }\n if (init_value->IsRoot() || init_value->user_count() > 1) {\n init_value = init_value->AddInstruction(init_value->Clone());\n TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(0, init_value));\n }\n if (!IsSinkableFusion(invariant_value)) {\n continue;\n }\n HloInstruction* fusion = CreateSinkableFusion(invariant_value);\n changed = true;\n if (fusion->operand_count() > 0 &&\n (while_instr->IsRoot() ||\n absl::c_any_of(while_instr->users(), [&](HloInstruction* use) {\n return use->opcode() != HloOpcode::kGetTupleElement;\n }))) {\n auto uses = while_instr->users();\n std::vector gtes(init_value->operand_count());\n for (int64_t i = 0; i < gtes.size(); ++i) {\n gtes[i] = while_instr->AddInstruction(\n HloInstruction::CreateGetTupleElement(while_instr, i));\n }\n HloInstruction* tuple =\n while_instr->AddInstruction(HloInstruction::CreateTuple(gtes));\n if (while_instr->IsRoot()) {\n while_instr->parent()->set_root_instruction(tuple);\n }\n if (!uses.empty()) {\n TF_RETURN_IF_ERROR(while_instr->ReplaceUsesWith(uses, tuple));\n }\n }\n absl::InlinedVector invariant_output_uses;\n for (auto use : while_instr->users()) {\n if (use->opcode() == HloOpcode::kGetTupleElement &&\n use->tuple_index() == index) {\n invariant_output_uses.push_back(use);\n }\n }\n for (auto use : invariant_output_uses) {\n TF_RETURN_IF_ERROR(\n while_instr->parent()->ReplaceInstruction(use, invariant_value));\n }\n HloInstruction* root = while_body->root_instruction();\n HloInstruction* parameter = while_body->parameter_instruction(0);\n tuple_indices.resize(fusion->operand_count());\n int64_t next_index = init_value->operand_count();\n new_operands.resize(fusion->operand_count());\n for (int64_t i = 0; i < fusion->operand_count(); ++i) {\n init_value->AppendOperand(fusion->mutable_operand(i));\n parameter->mutable_shape()->mutable_tuple_shapes()->push_back(\n fusion->mutable_operand(i)->shape());\n new_operands[i] = root->AddInstruction(\n HloInstruction::CreateGetTupleElement(parameter, next_index++));\n root->AppendOperand(new_operands[i]);\n }\n *(init_value->mutable_shape()) = parameter->shape();\n *(while_instr->mutable_shape()) = parameter->shape();\n *(while_cond->parameter_instruction(0)->mutable_shape()) =\n parameter->shape();\n *(root->mutable_shape()) = parameter->shape();\n auto cloned_fusion = while_body->AddInstruction(\n fusion->CloneWithNewOperands(fusion->shape(), new_operands));\n TF_RETURN_IF_ERROR(fusion->parent()->RemoveInstruction(fusion));\n TF_RETURN_IF_ERROR(\n while_body->ReplaceInstruction(invariant_body_gte, cloned_fusion));\n TF_RETURN_IF_ERROR(cloned_fusion->Defuse());\n }\n return changed;\n}\nabsl::StatusOr WhileLoopFusibleSinking::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n call_counts_.clear();\n bool changed = false;\n std::vector while_instrs;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),\n HloPredicateIsOp);\n }\n for (HloInstruction* while_instr : while_instrs) {\n call_counts_[while_instr->while_body()]++;\n call_counts_[while_instr->while_condition()]++;\n }\n for (HloInstruction* while_instr : while_instrs) {\n TF_ASSIGN_OR_RETURN(bool result,\n TrySinkingFusiblesIntoWhileLoop(while_instr));\n changed |= result;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/while_loop_fusible_sinking.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nusing ::testing::_;\nusing WhileLoopFusibleSinkingTest = HloTestBase;\nTEST_F(WhileLoopFusibleSinkingTest, SinkOneFusible) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[2],f32[2]) parameter(0)\n p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0\n p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1\n add.0 = f32[2] add(p_body.0, p_body.1)\n ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1)\n}\ncondition {\n p_cond = (f32[2],f32[2]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[2] parameter(0)\n const_1 = f32[2] iota(), iota_dimension=0\n while_init = (f32[2],f32[2]) tuple(const_0, const_1)\n ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopFusibleSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::Add(_, op::Iota()), _));\n}\nTEST_F(WhileLoopFusibleSinkingTest, SinkMask) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[5,7],f32[5,7]) parameter(0)\n p_body.0 = get-tuple-element(p_body), index=0\n p_body.1 = get-tuple-element(p_body), index=1\n add.0 = add(p_body.0, p_body.1)\n ROOT root = tuple(add.0, p_body.1)\n}\ncondition {\n p_cond = (f32[5,7],f32[5,7]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[5,7] parameter(0)\n p = f32[5] parameter(1)\n a = f32[5,7] iota(), iota_dimension=0\n b = f32[5,7] iota(), iota_dimension=1\n c = add(a, b)\n d = f32[5,7] broadcast(p), dimensions={0}\n mask = multiply(c,d)\n while_init = tuple(const_0, mask)\n ROOT while = while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopFusibleSinking{}.Run(module.get()));\n ASSERT_TRUE(changed);\n auto* while_body = module->GetComputationWithName(\"body\");\n EXPECT_THAT(while_body->root_instruction(),\n op::Tuple(op::Add(_, op::Multiply(op::Add(op::Iota(), op::Iota()),\n op::Broadcast())),\n _, _));\n}\nTEST_F(WhileLoopFusibleSinkingTest, NoSinkSlicedMask) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithWhile\nbody {\n p_body = (f32[5,7],f32[5,7]) parameter(0)\n p_body.0 = get-tuple-element(p_body), index=0\n p_body.1 = get-tuple-element(p_body), index=1\n z = s32[] constant(0)\n j = s32[] constant(3)\n ds = f32[1,7] dynamic-slice(p_body.1, j, z), dynamic_slice_sizes={1,7}\n r = f32[7] reshape(ds)\n b = f32[5,7] broadcast(r), dimensions={1}\n a = add(b, p_body.0)\n add.0 = add(a, p_body.1)\n ROOT root = tuple(add.0, p_body.1)\n}\ncondition {\n p_cond = (f32[5,7],f32[5,7]) parameter(0)\n ROOT result = pred[] constant(true)\n}\nENTRY entry {\n const_0 = f32[5,7] parameter(0)\n p = f32[5] parameter(1)\n a = f32[5,7] iota(), iota_dimension=0\n b = f32[5,7] iota(), iota_dimension=1\n c = add(a, b)\n d = f32[5,7] broadcast(p), dimensions={0}\n mask = multiply(c,d)\n while_init = tuple(const_0, mask)\n ROOT while = while(while_init), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopFusibleSinking{}.Run(module.get()));\n EXPECT_FALSE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1151,"cells":{"ID":{"kind":"string","value":"555aed98-8f55-421e-badb-302e32069668"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reduce_scatter_decomposer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/reduce_scatter_decomposer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/reduce_scatter_decomposer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/reduce_scatter_decomposer.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/collective_decomposer_utils.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/shape_util.h\"\nnamespace xla {\nabsl::StatusOr ReduceScatterDecomposer::Run(\n HloModule *module,\n const absl::flat_hash_set &execution_threads) {\n bool changed = false;\n int64_t next_channel_id = hlo_query::NextChannelId(*module);\n for (HloComputation *computation :\n module->MakeNonfusionComputations(execution_threads)) {\n for (HloInstruction *instruction :\n computation->MakeInstructionPostOrder()) {\n auto *rs = DynCast(instruction);\n if (!rs || !rs->shape().IsArray()) {\n continue;\n }\n std::optional channel_id;\n if (rs->channel_id()) {\n channel_id = next_channel_id++;\n }\n if (should_decompose_ && !should_decompose_(rs)) {\n continue;\n }\n VLOG(2) << \"Decompose: \" << rs->ToString();\n HloComputation *apply_clone = module->AddComputationAndUnifyNamesAndIds(\n rs->to_apply()->Clone(), false);\n HloInstruction *ar =\n computation->AddInstruction(HloInstruction::CreateAllReduce(\n rs->operand(0)->shape(), rs->operands(), apply_clone,\n rs->device_list(), rs->constrain_layout(), channel_id,\n rs->use_global_device_ids()));\n apply_clone->SetCollectiveCallInstruction(ar);\n TF_ASSIGN_OR_RETURN(\n CollectiveOpGroupMode group_mode,\n GetCollectiveOpGroupMode(rs->channel_id().has_value(),\n rs->use_global_device_ids()));\n TF_ASSIGN_OR_RETURN(\n std::vector start_indices,\n CreateStartIndicesForCollectiveDecomposition(\n group_mode, rs->replica_groups(), rs->shape(),\n rs->scatter_dimension(), computation, update_layout_));\n HloInstruction *ds =\n computation->AddInstruction(HloInstruction::CreateDynamicSlice(\n rs->shape(), ar, start_indices, rs->shape().dimensions()));\n TF_RETURN_IF_ERROR(rs->ReplaceAllUsesWith(ds));\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs));\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/reduce_scatter_decomposer.h\"\n#include \n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass ReduceScatterDecomposerTest : public HloTestBase {\n public:\n enum class PassAction {\n kNoChange,\n kTrivialGroups,\n kTableLookup,\n };\n void RunPass(\n absl::string_view hlo_module, PassAction action,\n CollectiveOpGroupMode mode = CollectiveOpGroupMode::kCrossReplica,\n int64_t shard_size = 0, int64_t shard_dimension = 0,\n int64_t replica_count = 2,\n std::function should_decompose =\n [](const HloInstruction *) { return true; }) {\n const int64_t partition_count = 2;\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, ParseAndReturnVerifiedModule(hlo_module, replica_count,\n partition_count));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ReduceScatterDecomposer(nullptr,\n should_decompose)\n .Run(module.get()));\n if (action == PassAction::kNoChange) {\n ASSERT_FALSE(changed);\n return;\n }\n ASSERT_TRUE(changed);\n Literal multiplier = LiteralUtil::CreateR0(shard_size);\n ::testing::Matcher id_matcher = [&]() {\n switch (mode) {\n case CollectiveOpGroupMode::kCrossPartition:\n return op::PartitionId();\n case CollectiveOpGroupMode::kCrossReplica:\n return op::ReplicaId();\n case CollectiveOpGroupMode::kCrossReplicaAndPartition:\n return op::ReplicaId();\n case CollectiveOpGroupMode::kFlattenedID: {\n return op::Add(\n op::Multiply(op::ReplicaId(),\n op::Constant(LiteralUtil::CreateR0(\n partition_count))),\n op::PartitionId());\n }\n }\n }();\n auto root = module->entry_computation()->root_instruction();\n const Shape &shape = root->shape();\n ::testing::Matcher slice_index = id_matcher;\n if (action == PassAction::kTableLookup) {\n slice_index = op::Reshape(op::DynamicSlice(op::Constant(), id_matcher));\n }\n if (mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {\n slice_index = op::Add(\n op::Multiply(\n slice_index,\n op::Constant(LiteralUtil::CreateR0(partition_count))),\n op::PartitionId());\n }\n auto zero_matcher = op::Constant(LiteralUtil::Zero(U32));\n std::vector<::testing::Matcher> ds_operands(\n shape.rank() + 1, zero_matcher);\n ds_operands[0] = op::AllReduce(op::Parameter(0));\n ds_operands[shard_dimension + 1] =\n op::Multiply(slice_index, op::Constant(std::move(multiplier)));\n EXPECT_THAT(root, op::DynamicSlice(ds_operands));\n }\n};\nTEST_F(ReduceScatterDecomposerTest, TrivialReplicaID) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0}, to_apply=sum\n}\n)\";\n RunPass(hlo_string, PassAction::kTrivialGroups,\n CollectiveOpGroupMode::kCrossReplica,\n 4);\n}\nTEST_F(ReduceScatterDecomposerTest, TableLookupReplicaId) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[8] parameter(0)\n ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{1, 0}}, dimensions={0}, to_apply=sum\n}\n)\";\n RunPass(hlo_string, PassAction::kTableLookup,\n CollectiveOpGroupMode::kCrossReplica,\n 4);\n}\nTEST_F(ReduceScatterDecomposerTest, TrivialCrossReplicaAndPartition) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[4, 8] parameter(0)\n ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{0, 1}}, channel_id=1, dimensions={1}, to_apply=sum\n}\n)\";\n RunPass(hlo_string, PassAction::kTrivialGroups,\n CollectiveOpGroupMode::kCrossReplicaAndPartition,\n 2, 1);\n}\nTEST_F(ReduceScatterDecomposerTest,\n TrivialCrossReplicaAndPartition_SingleReplica) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[4, 8] parameter(0)\n ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0}}, channel_id=1, dimensions={1}, to_apply=sum\n}\n)\";\n RunPass(hlo_string, PassAction::kTrivialGroups,\n CollectiveOpGroupMode::kCrossPartition,\n 4, 1, 1);\n}\nTEST_F(ReduceScatterDecomposerTest, TableLookupFlattenedId) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[4, 8] parameter(0)\n ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true\n}\n)\";\n RunPass(hlo_string, PassAction::kTableLookup,\n CollectiveOpGroupMode::kFlattenedID,\n 2, 1);\n}\nTEST_F(ReduceScatterDecomposerTest, NoChange) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[4, 8] parameter(0)\n ROOT rs = (f32[4, 2], f32[4,2]) reduce-scatter(p0, p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true\n}\n)\";\n RunPass(hlo_string, PassAction::kNoChange);\n}\nTEST_F(ReduceScatterDecomposerTest, NoChangeWithShouldDecompose) {\n absl::string_view hlo_string = R\"(\nHloModule m\nsum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n}\nENTRY main {\n p0 = f32[4, 8] parameter(0)\n ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0,1}, {2,3}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true\n}\n)\";\n RunPass(hlo_string, PassAction::kNoChange,\n CollectiveOpGroupMode::kCrossReplica,\n 0, 0,\n 2, [](const HloInstruction *) { return false; });\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1152,"cells":{"ID":{"kind":"string","value":"7d1d8932-f15f-4fd7-a669-a3307f9aec2f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"while_loop_concat_code_motion"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/while_loop_concat_code_motion.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/while_loop_concat_code_motion_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/while_loop_concat_code_motion.h\"\n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/tuple_simplifier.h\"\n#include \"xla/service/while_loop_simplifier.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/status.h\"\nnamespace xla {\nnamespace {\nstruct ConcatGroup {\n ConcatGroup(std::vector elements, int64_t concat_dim,\n bool inserted_concat_dim)\n : elements(std::move(elements)),\n element_sizes(this->elements.size(), 1),\n element_offsets(this->elements.size(), 0),\n concat_dim(concat_dim),\n inserted_concat_dim(inserted_concat_dim) {\n if (inserted_concat_dim) {\n absl::c_iota(element_offsets, 0);\n } else {\n for (int64_t i = 0; i < element_sizes.size(); ++i) {\n element_sizes[i] = this->elements[i]->shape().dimensions(concat_dim);\n if (i > 0) {\n element_offsets[i] = element_offsets[i - 1] + element_sizes[i - 1];\n }\n }\n }\n }\n Shape GetConcatShape() const {\n if (inserted_concat_dim) {\n std::vector dims;\n const Shape& element_shape = elements.back()->shape();\n dims.reserve(element_shape.rank() + 1);\n for (int64_t i = 0; i < element_shape.rank(); ++i) {\n if (i == concat_dim) {\n dims.push_back(elements.size());\n }\n dims.push_back(element_shape.dimensions(i));\n }\n if (dims.size() == concat_dim) {\n dims.push_back(elements.size());\n }\n return ShapeUtil::MakeShape(element_shape.element_type(), dims);\n } else {\n int64_t dim_size = 0;\n for (int64_t size : element_sizes) {\n dim_size += size;\n }\n Shape shape = elements.back()->shape();\n shape.set_dimensions(concat_dim, dim_size);\n return shape;\n }\n }\n HloInstruction* CreateSlice(HloInstruction* full_data, int64_t element_index,\n HloComputation* comp) const {\n Shape shape = full_data->shape();\n shape.set_dimensions(concat_dim, element_sizes[element_index]);\n std::vector starts(shape.rank(), 0);\n std::vector limits(shape.dimensions().begin(),\n shape.dimensions().end());\n starts[concat_dim] = element_offsets[element_index];\n limits[concat_dim] += starts[concat_dim];\n auto slice = comp->AddInstruction(\n HloInstruction::CreateSlice(shape, full_data, starts, limits,\n std::vector(shape.rank(), 1)));\n if (!inserted_concat_dim) {\n return slice;\n }\n std::vector element_shape;\n element_shape.reserve(shape.rank() - 1);\n for (int64_t i = 0; i < shape.rank(); ++i) {\n if (i != concat_dim) {\n element_shape.push_back(shape.dimensions(i));\n }\n }\n return comp->AddInstruction(HloInstruction::CreateReshape(\n ShapeUtil::MakeShape(shape.element_type(), element_shape), slice));\n }\n HloInstruction* CreateConcat(std::vector input_elements,\n HloComputation* comp) const {\n if (inserted_concat_dim) {\n for (int64_t i = 0; i < input_elements.size(); ++i) {\n std::vector element_shape;\n element_shape.reserve(input_elements[i]->shape().rank() + 1);\n for (int64_t j = 0; j < input_elements[i]->shape().rank(); ++j) {\n if (j == concat_dim) {\n element_shape.push_back(1);\n }\n element_shape.push_back(input_elements[i]->shape().dimensions(j));\n }\n if (element_shape.size() == concat_dim) {\n element_shape.push_back(1);\n }\n input_elements[i] = comp->AddInstruction(HloInstruction::CreateReshape(\n ShapeUtil::MakeShape(input_elements[i]->shape().element_type(),\n element_shape),\n input_elements[i]));\n }\n }\n return comp->AddInstruction(HloInstruction::CreateConcatenate(\n GetConcatShape(), input_elements, concat_dim));\n }\n std::vector elements;\n std::vector element_sizes;\n std::vector element_offsets;\n int64_t concat_dim;\n bool inserted_concat_dim;\n};\nclass ConcatGroups {\n public:\n std::optional> GetGroupIndex(\n const HloInstruction* hlo) const {\n auto it = element_to_group_.find(hlo);\n if (it == element_to_group_.end()) {\n return std::nullopt;\n }\n return it->second;\n }\n const ConcatGroup& GetGroup(int64_t index) const { return groups_[index]; }\n std::pair MaybeCreateNewGroup(ConcatGroup group) {\n int64_t group_id = -1;\n absl::flat_hash_set elements_dedup;\n for (int64_t i = 0; i < group.elements.size(); ++i) {\n if (!elements_dedup.insert(group.elements[i]).second) {\n VLOG(2) << \"Duplicates in group. Element: \"\n << group.elements[i]->ToString();\n }\n if (concat_disallowed_.contains(group.elements[i])) {\n VLOG(2) << \"Failed creating group. Grouping disallowed on \"\n << group.elements[i]->ToString();\n return std::pair(false, -1);\n }\n auto existing = GetGroupIndex(group.elements[i]);\n if (existing.has_value() &&\n (i != existing->second ||\n groups_[existing->first].concat_dim != group.concat_dim)) {\n VLOG(2)\n << \"Failed creating group. Different than existing group. Element: \"\n << group.elements[i]->ToString();\n return std::pair(false, -1);\n }\n if (i == 0 && existing.has_value()) {\n group_id = existing->first;\n }\n if (i > 0) {\n if (existing.has_value() && existing->first != group_id) {\n VLOG(2) << \"Failed creating group. Different than existing group. \"\n \"Element: \"\n << group.elements[i]->ToString();\n return std::pair(false, -1);\n }\n if (!existing.has_value() && group_id >= 0) {\n VLOG(2) << \"Failed creating group. Different than existing group. \"\n \"Element: \"\n << group.elements[i]->ToString();\n return std::pair(false, -1);\n }\n }\n }\n if (group_id >= 0) {\n VLOG(2) << \"Group already exists at \" << group_id << \" for \"\n << group.elements[0]->ToString();\n return std::pair(false, group_id);\n }\n int64_t index = groups_.size();\n for (int64_t i = 0; i < group.elements.size(); ++i) {\n element_to_group_[group.elements[i]] =\n std::pair(index, i);\n }\n VLOG(2) << \"Created new group at \" << index << \" for \"\n << group.elements[0]->ToString()\n << \", concat_dim: \" << group.concat_dim\n << \", inserted: \" << group.inserted_concat_dim;\n groups_.push_back(std::move(group));\n return std::pair(true, index);\n }\n const std::vector& Groups() const { return groups_; }\n int64_t NextGroupIndex() const { return groups_.size(); }\n void RemoveTailingGroups(int64_t start_index) {\n while (groups_.size() > start_index) {\n for (auto element : groups_.back().elements) {\n element_to_group_.erase(element);\n }\n groups_.pop_back();\n }\n }\n void DisallowGroupingOn(const HloInstruction* hlo) {\n VLOG(2) << \"Disallow grouping on \" << hlo->ToString();\n concat_disallowed_.insert(hlo);\n }\n private:\n absl::flat_hash_map>\n element_to_group_;\n std::vector groups_;\n absl::flat_hash_set concat_disallowed_;\n};\nstd::optional> GetOperandConcatDim(\n const HloInstruction* hlo, int64_t operand_index, int64_t hlo_concat_dim,\n bool hlo_inserted_concat_dim,\n const ConcatGroup* combined_operand_group = nullptr) {\n if (hlo->IsElementwise() || hlo->opcode() == HloOpcode::kAllReduce) {\n return std::pair(hlo_concat_dim, hlo_inserted_concat_dim);\n }\n int64_t operand_concat_dim = -1;\n bool operand_inserted_concat_dim = false;\n const Shape& operand_shape =\n combined_operand_group == nullptr\n ? hlo->operand(operand_index)->shape()\n : combined_operand_group->elements.back()->shape();\n if (hlo->opcode() == HloOpcode::kBroadcast) {\n operand_concat_dim = 0;\n operand_inserted_concat_dim = true;\n int64_t min_dist_to_concat_dim = hlo->shape().rank();\n for (int64_t i = 0; i < operand_shape.rank(); ++i) {\n if (hlo->dimensions(i) == hlo_concat_dim) {\n operand_concat_dim = i;\n operand_inserted_concat_dim = hlo_inserted_concat_dim;\n break;\n }\n if (hlo->dimensions(i) < hlo_concat_dim &&\n min_dist_to_concat_dim > hlo_concat_dim - hlo->dimensions(i)) {\n operand_concat_dim = i + 1;\n min_dist_to_concat_dim = hlo_concat_dim - hlo->dimensions(i);\n }\n if (hlo->dimensions(i) > hlo_concat_dim &&\n min_dist_to_concat_dim > hlo->dimensions(i) - hlo_concat_dim) {\n operand_concat_dim = i;\n min_dist_to_concat_dim = hlo->dimensions(i) - hlo_concat_dim;\n }\n }\n } else if (hlo->opcode() == HloOpcode::kReduce) {\n if (operand_index != 0) {\n return std::nullopt;\n }\n operand_concat_dim = hlo_concat_dim;\n operand_inserted_concat_dim = hlo_inserted_concat_dim;\n std::set sorted_reduce_dims;\n for (int64_t dim : hlo->dimensions()) {\n sorted_reduce_dims.insert(dim);\n }\n for (int64_t dim : sorted_reduce_dims) {\n if ((hlo_inserted_concat_dim && dim < operand_concat_dim) ||\n (!hlo_inserted_concat_dim && dim <= operand_concat_dim)) {\n operand_concat_dim++;\n }\n }\n } else if (hlo->opcode() == HloOpcode::kReshape) {\n int64_t i = 0;\n int64_t j = 0;\n operand_inserted_concat_dim = false;\n while (i < operand_shape.rank() || j <= hlo_concat_dim) {\n if (i < operand_shape.rank() && j < hlo->shape().rank() &&\n operand_shape.dimensions(i) == hlo->shape().dimensions(j)) {\n if (j == hlo_concat_dim) {\n operand_inserted_concat_dim =\n hlo_inserted_concat_dim && operand_shape.dimensions(i) != 1;\n operand_concat_dim = i;\n break;\n }\n i++;\n j++;\n continue;\n }\n if (i < operand_shape.rank() && operand_shape.dimensions(i) == 1) {\n if (j == hlo_concat_dim && hlo_inserted_concat_dim) {\n operand_concat_dim = i;\n break;\n }\n i++;\n continue;\n }\n if (j == hlo_concat_dim) {\n operand_concat_dim = i;\n operand_inserted_concat_dim = true;\n break;\n }\n if (j < hlo->shape().rank() && hlo->shape().dimensions(j) == 1) {\n j++;\n continue;\n }\n return std::nullopt;\n }\n } else {\n return std::nullopt;\n }\n CHECK_GE(operand_concat_dim, 0);\n return std::pair(operand_concat_dim,\n operand_inserted_concat_dim);\n}\nvoid ModifyHloPropertiesForConcatShape(const ConcatGroup& group,\n HloInstruction* hlo) {\n *hlo->mutable_shape() = group.GetConcatShape();\n if (hlo->opcode() == HloOpcode::kBroadcast) {\n auto operand_dim = GetOperandConcatDim(\n group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);\n CHECK(operand_dim.has_value());\n int64_t operand_concat_dim = operand_dim->first;\n bool operand_inserted_concat_dim = operand_dim->second;\n if (operand_inserted_concat_dim) {\n CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size() + 1)\n << hlo->ToString();\n } else {\n CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size());\n }\n std::vector dims;\n const int64_t rank = hlo->operand(0)->shape().rank();\n dims.reserve(rank);\n for (int64_t i = 0; i < rank; ++i) {\n if (i == operand_concat_dim && operand_inserted_concat_dim) {\n dims.push_back(group.concat_dim);\n } else {\n if (i > operand_concat_dim && operand_inserted_concat_dim) {\n dims.push_back(hlo->dimensions(i - 1));\n } else {\n dims.push_back(hlo->dimensions(i));\n }\n if (group.inserted_concat_dim && dims.back() >= group.concat_dim) {\n dims.back()++;\n }\n }\n }\n *hlo->mutable_dimensions() = std::move(dims);\n } else if (hlo->opcode() == HloOpcode::kReduce) {\n auto operand_dim = GetOperandConcatDim(\n group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);\n int64_t operand_concat_dim = operand_dim->first;\n bool operand_inserted_concat_dim = operand_dim->second;\n CHECK(operand_dim.has_value());\n if (operand_inserted_concat_dim) {\n auto dims = hlo->mutable_dimensions();\n for (int64_t i = 0; i < dims->size(); ++i) {\n if ((*dims)[i] >= operand_concat_dim) {\n (*dims)[i]++;\n }\n }\n }\n }\n}\nbool GroupHlosForConcat(\n HloComputation* body, HloInstruction* concat,\n absl::flat_hash_map topological_order,\n ConcatGroups* groups) {\n const int64_t group_size = concat->operand_count();\n absl::flat_hash_set used_groups;\n auto root_tuple = body->root_instruction();\n CHECK_EQ(root_tuple->opcode(), HloOpcode::kTuple);\n absl::flat_hash_map root_tuple_element_use_count;\n for (auto operand : root_tuple->operands()) {\n root_tuple_element_use_count.emplace(operand, 0).first->second++;\n }\n std::multimap pq;\n const int64_t first_group_id_to_create = groups->NextGroupIndex();\n auto fail_and_cleanup = [&] {\n VLOG(1) << \"Failed to get the subcomputation to optimize for \"\n << concat->ToString() << \", clear groups starting at \"\n << first_group_id_to_create;\n groups->RemoveTailingGroups(first_group_id_to_create);\n return false;\n };\n struct GroupUse {\n int64_t group_id;\n bool newly_created;\n bool already_used_by_subcomp;\n };\n auto maybe_create_group = [&](ConcatGroup group) {\n auto res = groups->MaybeCreateNewGroup(std::move(group));\n GroupUse use{res.second, false, false};\n if (res.second < 0) {\n return use;\n }\n use.newly_created = res.first;\n use.already_used_by_subcomp = !used_groups.insert(res.second).second;\n return use;\n };\n std::vector concat_operands(concat->operands().begin(),\n concat->operands().end());\n int64_t concat_operand_order = -topological_order[concat_operands[0]];\n pq.emplace(concat_operand_order,\n ConcatGroup(std::move(concat_operands),\n concat->concatenate_dimension(), false));\n while (!pq.empty()) {\n auto group = std::move(pq.begin()->second);\n pq.erase(pq.begin());\n const auto& hlos = group.elements;\n VLOG(2) << \"GroupHlosForConcat dequeued \" << hlos[0]->ToString();\n bool group_is_param_gtes = false;\n if (absl::c_all_of(hlos, [&](const HloInstruction* element) {\n return element == hlos[0];\n })) {\n if (groups->GetGroupIndex(hlos[0]).has_value()) {\n VLOG(1) << \"We do not support the case if a shared operand also part \"\n \"of a group: \"\n << hlos[0]->ToString();\n return fail_and_cleanup();\n }\n groups->DisallowGroupingOn(hlos[0]);\n continue;\n }\n if (absl::c_all_of(hlos, [&](const HloInstruction* element) {\n return element->opcode() == HloOpcode::kGetTupleElement &&\n element->operand(0) == body->parameter_instruction(0);\n })) {\n group_is_param_gtes = true;\n } else if (((hlos[0]->IsElementwise() ||\n hlos[0]->opcode() == HloOpcode::kAllReduce) &&\n !hlos[0]->HasSideEffect()) ||\n hlos[0]->opcode() == HloOpcode::kBroadcast ||\n hlos[0]->opcode() == HloOpcode::kReduce ||\n hlos[0]->opcode() == HloOpcode::kReshape ||\n hlos[0]->IsCustomCall(\"Sharding\")) {\n if (hlos[0]->opcode() == HloOpcode::kAllReduce &&\n (!hlos[0]->shape().IsArray() || hlos[0]->IsCrossModuleAllReduce())) {\n VLOG(2) << \"Unsupported allreduce: \" << hlos[0]->ToString();\n return fail_and_cleanup();\n }\n if (absl::c_any_of(hlos, [&](const HloInstruction* element) {\n auto eq_operand = [](const HloInstruction* a,\n const HloInstruction* b) {\n return ShapeUtil::Compatible(a->shape(), b->shape());\n };\n auto eq_computations = [](const HloComputation* lhs,\n const HloComputation* rhs) {\n return lhs->Equal(*rhs, false);\n };\n if (!hlos[0]->Identical(*element, eq_operand, eq_computations,\n false)) {\n return true;\n }\n if (element->opcode() == HloOpcode::kReduce &&\n (element->operand_count() != 2 ||\n element->operand(1) != hlos[0]->operand(1))) {\n return true;\n }\n return false;\n })) {\n VLOG(2) << \"Different types of elements. First element: \"\n << hlos[0]->ToString();\n return fail_and_cleanup();\n }\n int64_t input_count = hlos[0]->operand_count();\n if (hlos[0]->opcode() == HloOpcode::kReduce) {\n CHECK_EQ(input_count, 2);\n input_count = 1;\n }\n for (int64_t i = 0; i < input_count; ++i) {\n std::vector elements(group_size);\n for (int64_t j = 0; j < group_size; ++j) {\n elements[j] = hlos[j]->mutable_operand(i);\n }\n auto maybe_new_concat_dim = GetOperandConcatDim(\n hlos[0], i, group.concat_dim, group.inserted_concat_dim);\n if (!maybe_new_concat_dim.has_value()) {\n VLOG(2) << \"Cannot find operand concat dimension for operand \" << i\n << \" of \" << hlos[0]->ToString();\n return fail_and_cleanup();\n }\n int64_t new_group_concat_dim = maybe_new_concat_dim->first;\n bool inserted_concat_dim = maybe_new_concat_dim->second;\n int64_t element_order = -topological_order[elements[0]];\n pq.emplace(element_order,\n ConcatGroup(std::move(elements), new_group_concat_dim,\n inserted_concat_dim));\n }\n } else if (hlos[0]->opcode() == HloOpcode::kSlice) {\n int64_t offset = 0;\n auto operand = hlos[0]->operand(0);\n if (group.inserted_concat_dim) {\n VLOG(2) << \"Slices cannot be grouped on new dimension.\";\n return fail_and_cleanup();\n }\n if (groups->GetGroupIndex(operand).has_value()) {\n return fail_and_cleanup();\n }\n groups->DisallowGroupingOn(operand);\n for (int64_t i = 0; i < group_size; ++i) {\n if (hlos[i]->operand(0) != operand) {\n VLOG(2) << \"Slices of different operands.\";\n return fail_and_cleanup();\n }\n for (int64_t j = 0; j < hlos[i]->shape().rank(); ++j) {\n if (hlos[i]->slice_strides(j) != 1) {\n VLOG(2) << \"Slices with strides.\";\n return fail_and_cleanup();\n }\n if (j == group.concat_dim) {\n if (hlos[i]->slice_starts(j) != offset) {\n VLOG(2) << \"Slices with unsupported offsets.\";\n return fail_and_cleanup();\n }\n offset += hlos[i]->shape().dimensions(j);\n } else {\n if (hlos[i]->slice_starts(j) != 0 ||\n hlos[i]->slice_limits(j) != operand->shape().dimensions(j)) {\n VLOG(2) << \"Slice with unsupported offsets at dimension \" << j\n << \", \" << hlos[i]->ToString();\n return fail_and_cleanup();\n }\n }\n }\n }\n if (offset != operand->shape().dimensions(group.concat_dim)) {\n VLOG(2) << \"Slices with unsupported sizes.\";\n return fail_and_cleanup();\n }\n } else {\n VLOG(2) << \"Unsupported opcode: \" << hlos[0]->ToString();\n return fail_and_cleanup();\n }\n auto guse = maybe_create_group(std::move(group));\n if (guse.group_id < 0) {\n VLOG(2) << \"Failed to create group.\";\n return fail_and_cleanup();\n }\n const auto& registered_group = groups->GetGroup(guse.group_id);\n if (!guse.already_used_by_subcomp && group_is_param_gtes) {\n std::vector new_outputs(group_size);\n for (int64_t i = 0; i < group_size; ++i) {\n new_outputs[i] = root_tuple->mutable_operand(\n registered_group.elements[i]->tuple_index());\n }\n int64_t new_output_order = -topological_order[new_outputs[0]];\n pq.emplace(\n new_output_order,\n ConcatGroup(std::move(new_outputs), registered_group.concat_dim,\n registered_group.inserted_concat_dim));\n }\n }\n return groups->Groups().size() > first_group_id_to_create;\n}\nstd::vector TupleElementsUsedInCond(HloInstruction* loop) {\n std::vector result(loop->shape().tuple_shapes_size(), false);\n for (auto user : loop->while_condition()->parameter_instruction(0)->users()) {\n if (user->opcode() != HloOpcode::kGetTupleElement) {\n absl::c_fill(result, true);\n return result;\n }\n result[user->tuple_index()] = true;\n }\n return result;\n}\nabsl::Status AddCopiesToRoot(HloComputation* body,\n absl::Span param_gtes,\n ConcatGroups* groups) {\n auto root = body->root_instruction();\n CHECK_EQ(root->opcode(), HloOpcode::kTuple);\n std::vector copies(root->operand_count(), nullptr);\n for (int64_t i = 0; i < copies.size(); ++i) {\n auto element = root->mutable_operand(i);\n if (!element->shape().IsArray()) {\n continue;\n }\n copies[i] = body->AddInstruction(HloInstruction::CreateUnary(\n element->shape(), HloOpcode::kCopy, element));\n TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copies[i]));\n }\n for (int64_t i = 0; i < copies.size(); ++i) {\n auto copy = copies[i];\n if (groups->GetGroupIndex(copy).has_value()) {\n continue;\n }\n auto param_group_index = groups->GetGroupIndex(param_gtes[i]);\n if (!param_group_index.has_value()) {\n continue;\n }\n const auto& param_group = groups->GetGroup(param_group_index->first);\n std::vector copy_group(param_group.elements.size());\n for (int64_t j = 0; j < copy_group.size(); ++j) {\n copy_group[j] = copies[param_group.elements[j]->tuple_index()];\n }\n CHECK(groups\n ->MaybeCreateNewGroup(\n ConcatGroup(std::move(copy_group), param_group.concat_dim,\n param_group.inserted_concat_dim))\n .first);\n }\n return absl::OkStatus();\n}\nabsl::Status RemoveCopiesFromRoot(HloComputation* body) {\n auto root = body->root_instruction();\n CHECK_EQ(root->opcode(), HloOpcode::kTuple);\n for (int64_t i = 0; i < root->operand_count(); ++i) {\n auto copy = root->mutable_operand(i);\n if (copy->opcode() == HloOpcode::kCopy) {\n TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copy->mutable_operand(0)));\n }\n }\n return absl::OkStatus();\n}\nabsl::Status RewriteLoopWithConcatGroups(\n HloInstruction* loop, absl::Span param_gtes,\n ConcatGroups& groups) {\n VLOG(1) << \"RewriteLoopWithConcatGroups with \" << groups.Groups().size()\n << \" groups.\";\n absl::flat_hash_set processed_groups;\n auto body = loop->while_body();\n auto param = body->parameter_instruction(0);\n auto cond_param = loop->while_condition()->parameter_instruction(0);\n std::vector init_elements(loop->shape().tuple_shapes_size());\n for (int64_t i = 0; i < param_gtes.size(); ++i) {\n init_elements[i] =\n loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(\n loop->shape().tuple_shapes(i), loop->mutable_operand(0), i));\n }\n for (int64_t i = 0; i < param_gtes.size(); ++i) {\n const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]);\n if (!group_and_index.has_value() || group_and_index->second != 0) {\n continue;\n }\n const auto& group = groups.GetGroup(group_and_index->first);\n *param_gtes[i]->mutable_shape() = group.GetConcatShape();\n *param->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();\n *body->root_instruction()->mutable_shape()->mutable_tuple_shapes(i) =\n param_gtes[i]->shape();\n *cond_param->mutable_shape()->mutable_tuple_shapes(i) =\n param_gtes[i]->shape();\n *loop->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();\n processed_groups.insert(group_and_index->first);\n std::vector input_concat_elements;\n input_concat_elements.reserve(group.elements.size());\n for (auto param_gte : group.elements) {\n input_concat_elements.push_back(init_elements[param_gte->tuple_index()]);\n }\n init_elements[i] =\n group.CreateConcat(std::move(input_concat_elements), loop->parent());\n }\n TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(\n 0, loop->parent()->AddInstruction(\n HloInstruction::CreateTuple(init_elements))));\n auto original_loop_users = loop->users();\n const bool loop_is_root = loop == loop->parent()->root_instruction();\n std::vector output_elements(\n loop->shape().tuple_shapes_size());\n for (int64_t i = 0; i < param_gtes.size(); ++i) {\n output_elements[i] =\n loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(\n init_elements[i]->shape(), loop, i));\n }\n for (int64_t i = 0; i < param_gtes.size(); ++i) {\n const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]);\n if (!group_and_index.has_value() || group_and_index->second != 0) {\n continue;\n }\n const auto& group = groups.GetGroup(group_and_index->first);\n auto concat_output = output_elements[group.elements[0]->tuple_index()];\n for (int64_t j = 0; j < group.elements.size(); ++j) {\n const auto param_gte = group.elements[j];\n output_elements[param_gte->tuple_index()] =\n group.CreateSlice(concat_output, j, loop->parent());\n }\n }\n auto new_output_tuple = loop->parent()->AddInstruction(\n HloInstruction::CreateTuple(output_elements));\n for (auto user : original_loop_users) {\n TF_RETURN_IF_ERROR(\n loop->ReplaceUseWithDifferentShape(user, new_output_tuple));\n }\n if (loop_is_root) {\n loop->parent()->set_root_instruction(new_output_tuple,\n true);\n }\n std::vector slices_to_remove;\n absl::flat_hash_set new_reshapes;\n for (auto hlo : body->MakeInstructionPostOrder()) {\n const auto& group_and_index = groups.GetGroupIndex(hlo);\n if (!group_and_index.has_value() || group_and_index->second != 0) {\n continue;\n }\n if (!processed_groups.insert(group_and_index->first).second) {\n continue;\n }\n const auto& group = groups.GetGroup(group_and_index->first);\n if (hlo->opcode() == HloOpcode::kSlice) {\n slices_to_remove.push_back(hlo);\n } else {\n int64_t operand_count_to_adjust = hlo->operand_count();\n if (hlo->opcode() == HloOpcode::kReduce) {\n CHECK_EQ(operand_count_to_adjust, 2);\n operand_count_to_adjust = 1;\n }\n for (int64_t i = 0; i < operand_count_to_adjust; ++i) {\n auto operand_group_index = groups.GetGroupIndex(hlo->operand(i));\n const ConcatGroup* operand_group =\n operand_group_index.has_value()\n ? &groups.GetGroup(operand_group_index->first)\n : nullptr;\n auto maybe_operand_concat_dim = GetOperandConcatDim(\n hlo, i, group.concat_dim, group.inserted_concat_dim, operand_group);\n CHECK(maybe_operand_concat_dim.has_value())\n << \"Operand \" << i << \" of \" << hlo->ToString();\n int64_t operand_concat_dim = maybe_operand_concat_dim->first;\n bool operand_inserted_concat_dim = maybe_operand_concat_dim->second;\n if (operand_group != nullptr) {\n CHECK_EQ(operand_concat_dim, operand_group->concat_dim);\n if (operand_inserted_concat_dim !=\n operand_group->inserted_concat_dim) {\n std::vector new_dims;\n int64_t d = 0;\n for (; d < operand_concat_dim; ++d) {\n new_dims.push_back(hlo->operand(i)->shape().dimensions(d));\n }\n if (operand_inserted_concat_dim) {\n new_dims.push_back(group.elements.size());\n new_dims.push_back(\n hlo->operand(i)->shape().dimensions(operand_concat_dim) /\n group.elements.size());\n d = operand_concat_dim + 1;\n } else {\n new_dims.push_back(\n group.elements.size() *\n hlo->operand(i)->shape().dimensions(operand_concat_dim + 1));\n d = operand_concat_dim + 2;\n }\n for (; d < hlo->operand(i)->shape().rank(); ++d) {\n new_dims.push_back(hlo->operand(i)->shape().dimensions(d));\n }\n auto reshape = body->AddInstruction(HloInstruction::CreateReshape(\n ShapeUtil::MakeShape(hlo->operand(i)->shape().element_type(),\n new_dims),\n hlo->mutable_operand(i)));\n new_reshapes.insert(reshape);\n TF_RETURN_IF_ERROR(\n hlo->ReplaceOperandWithDifferentShape(i, reshape));\n }\n continue;\n }\n CHECK(\n absl::c_all_of(group.elements, [&](const HloInstruction* element) {\n return element->operand(i) == hlo->operand(i);\n }));\n VLOG(2) << \"Broadcasting shared operand \"\n << hlo->operand(i)->ToString();\n Shape data_shape = hlo->operand(i)->shape();\n std::vector broadcast_dims;\n std::vector broadcast_shape;\n const int64_t data_shape_rank = data_shape.rank();\n broadcast_dims.reserve(data_shape_rank);\n broadcast_shape.reserve(data_shape_rank + 1);\n for (int64_t j = 0; j < data_shape_rank; ++j) {\n if (j < operand_concat_dim) {\n broadcast_dims.push_back(j);\n } else {\n broadcast_dims.push_back(j + 1);\n }\n if (j == operand_concat_dim) {\n broadcast_shape.push_back(group.elements.size());\n }\n broadcast_shape.push_back(data_shape.dimensions(j));\n }\n if (broadcast_shape.size() == data_shape.rank()) {\n broadcast_shape.push_back(group.elements.size());\n }\n auto broadcast = body->AddInstruction(HloInstruction::CreateBroadcast(\n ShapeUtil::MakeShape(data_shape.element_type(), broadcast_shape),\n hlo->mutable_operand(i), broadcast_dims));\n if (!operand_inserted_concat_dim) {\n data_shape.set_dimensions(\n operand_concat_dim,\n data_shape.dimensions(operand_inserted_concat_dim) *\n group.elements.size());\n broadcast = body->AddInstruction(\n HloInstruction::CreateReshape(data_shape, broadcast));\n }\n TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, broadcast));\n }\n }\n VLOG(2) << \"Modifying HLO to full shape \" << hlo->ToString();\n ModifyHloPropertiesForConcatShape(group, hlo);\n VLOG(2) << \"Modified HLO to full shape \" << hlo->ToString();\n }\n for (auto hlo : body->MakeInstructionPostOrder()) {\n if (new_reshapes.contains(hlo)) {\n continue;\n }\n const auto& group_and_index = groups.GetGroupIndex(hlo);\n if ((!group_and_index.has_value() || hlo->opcode() == HloOpcode::kReduce) &&\n hlo != body->root_instruction()) {\n auto operands = hlo->operands();\n if (group_and_index.has_value()) {\n CHECK_EQ(operands.size(), 2);\n CHECK_EQ(hlo->opcode(), HloOpcode::kReduce);\n operands.erase(operands.begin());\n }\n for (int64_t i = 0; i < operands.size(); ++i) {\n auto operand = operands[i];\n auto operand_group_index = groups.GetGroupIndex(operand);\n if (!operand_group_index.has_value()) {\n continue;\n }\n const auto& operand_group = groups.GetGroup(operand_group_index->first);\n auto slice = operand_group.CreateSlice(\n operand_group.elements[0], operand_group_index->second, body);\n TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, slice));\n }\n }\n }\n for (auto slice : slices_to_remove) {\n TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(slice->mutable_operand(0)));\n TF_RETURN_IF_ERROR(body->RemoveInstruction(slice));\n }\n return absl::OkStatus();\n}\nabsl::StatusOr RunOnLoop(HloInstruction* loop,\n int64_t min_operand_count_to_optimize) {\n auto body = loop->while_body();\n auto param = body->parameter_instruction(0);\n auto root = body->root_instruction();\n if (!param->shape().IsTuple() || root->opcode() != HloOpcode::kTuple) {\n return false;\n }\n std::vector gtes(param->shape().tuple_shapes_size(),\n nullptr);\n ConcatGroups groups;\n auto indices_used_in_cond = TupleElementsUsedInCond(loop);\n for (auto user : param->users()) {\n if (user->opcode() != HloOpcode::kGetTupleElement) {\n return false;\n }\n int64_t idx = user->tuple_index();\n if (gtes[idx] != nullptr) {\n return false;\n }\n gtes[idx] = user;\n if (indices_used_in_cond[idx]) {\n groups.DisallowGroupingOn(user);\n }\n }\n std::vector concats;\n auto body_instructions = body->MakeInstructionPostOrder();\n absl::flat_hash_map topological_order;\n for (int64_t i = 0; i < body_instructions.size(); ++i) {\n auto hlo = body_instructions[i];\n topological_order[hlo] = i;\n if (hlo->opcode() == HloOpcode::kConcatenate &&\n hlo->operand_count() >= min_operand_count_to_optimize) {\n concats.push_back(hlo);\n }\n }\n for (auto& concat : concats) {\n if (!GroupHlosForConcat(body, concat, topological_order, &groups)) {\n concat = nullptr;\n }\n }\n if (groups.Groups().empty()) {\n return false;\n }\n TF_RETURN_IF_ERROR(AddCopiesToRoot(body, gtes, &groups));\n TF_RETURN_IF_ERROR(RewriteLoopWithConcatGroups(loop, gtes, groups));\n for (auto concat : concats) {\n if (concat == nullptr) {\n continue;\n }\n auto new_slice = concat->mutable_operand(0);\n CHECK_EQ(new_slice->opcode(), HloOpcode::kSlice);\n TF_RETURN_IF_ERROR(\n concat->ReplaceAllUsesWith(new_slice->mutable_operand(0)));\n TF_RETURN_IF_ERROR(body->RemoveInstruction(concat));\n }\n TF_RETURN_IF_ERROR(RemoveCopiesFromRoot(body));\n for (auto gte : gtes) {\n auto group_index = groups.GetGroupIndex(gte);\n if (group_index.has_value() && group_index->second > 0) {\n TF_RETURN_IF_ERROR(root->ReplaceOperandWith(gte->tuple_index(), gte));\n }\n }\n return true;\n}\n} \nabsl::StatusOr WhileLoopConcatCodeMotion::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (HloComputation* comp :\n module->MakeComputationPostOrder(execution_threads)) {\n for (HloInstruction* hlo : comp->MakeInstructionPostOrder()) {\n if (hlo->opcode() == HloOpcode::kWhile) {\n TF_ASSIGN_OR_RETURN(bool loop_changed,\n RunOnLoop(hlo, min_operand_count_to_optimize_));\n changed |= loop_changed;\n }\n }\n }\n if (changed) {\n HloPassPipeline pipeline(\"loop-concat-motion-cleanup\");\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n TF_RETURN_IF_ERROR(pipeline.Run(module, execution_threads).status());\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/while_loop_concat_code_motion.h\"\n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla {\nnamespace {\nnamespace op = ::xla::testing::opcode_matchers;\nclass WhileLoopConcatCodeMotionTest : public HloTestBase {};\nTEST_F(WhileLoopConcatCodeMotionTest, SimpleMotion) {\n constexpr absl::string_view kHloModule = R\"(\n HloModule test\n %cond {\n %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %constant = s32[] constant(5)\n ROOT result = pred[] compare(%gte.0, %constant), direction=LT\n }\n %body {\n %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1\n %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2\n %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}\n %ccall = f32[2048,1024] custom-call(%concat), custom_call_target=\"test\"\n %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}\n %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}\n %ccall2 = f32[1024,1024] custom-call(), custom_call_target=\"test2\"\n %add.0 = f32[1024,1024] add(%slice.0, %ccall2)\n %add.1 = f32[1024,1024] add(%slice.1, %ccall2)\n %t0 = token[] after-all()\n %outfeed = token[] outfeed(%slice.1, %t0)\n %constant = s32[] constant(1)\n %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)\n ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])\n tuple(%increment_iteration, %add.0, %add.1)\n }\n ENTRY test_main {\n %param.0 = f32[1024,1024] parameter(0)\n %param.1 = f32[1024,1024] parameter(1)\n %constant.0 = s32[] constant(0)\n %while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)\n ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConcatCodeMotion(2).Run(module.get()));\n ASSERT_TRUE(changed);\n VLOG(1) << module->ToString();\n auto loop = op::While(\n op::Tuple(op::Constant(),\n AllOf(op::Shape(\"f32[2048,1024]\"),\n op::Concatenate(op::Parameter(0), op::Parameter(1)))));\n ASSERT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop))));\n auto while_op =\n module->entry_computation()->root_instruction()->operand(0)->operand(0);\n EXPECT_THAT(while_op->while_body()->root_instruction(),\n op::Tuple(op::Add(),\n op::Add(op::CustomCall(),\n op::Reshape(op::Broadcast(op::CustomCall())))));\n}\nTEST_F(WhileLoopConcatCodeMotionTest, NoMotionWithChangedElementOrder) {\n constexpr absl::string_view kHloModule = R\"(\n HloModule test\n %cond {\n %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %constant = s32[] constant(5)\n ROOT result = pred[] compare(%gte.0, %constant), direction=LT\n }\n %body {\n %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1\n %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2\n %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}\n %ccall = f32[2048,1024] custom-call(%concat), custom_call_target=\"test\"\n %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}\n %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}\n %constant = s32[] constant(1)\n %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)\n ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])\n tuple(%increment_iteration, %slice.1, %slice.0)\n }\n ENTRY test_main {\n %param.0 = f32[1024,1024] parameter(0)\n %param.1 = f32[1024,1024] parameter(1)\n %constant.0 = s32[] constant(0)\n %while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)\n ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConcatCodeMotion(2).Run(module.get()));\n ASSERT_FALSE(changed);\n}\nTEST_F(WhileLoopConcatCodeMotionTest, CascadedConcats) {\n constexpr absl::string_view kHloModule = R\"(\n HloModule test\n %cond {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %constant = s32[] constant(5)\n ROOT result = pred[] compare(%gte.0, %constant), direction=LT\n }\n %body {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1\n %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2\n %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}\n %gte.3 = f32[1024,1024] get-tuple-element(%param), index=3\n %gte.4 = f32[1024,1024] get-tuple-element(%param), index=4\n %ccall = f32[2048,1024] custom-call(%concat), custom_call_target=\"test\"\n %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}\n %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}\n %add.0 = f32[1024,1024] add(%slice.0, %gte.3)\n %add.1 = f32[1024,1024] add(%slice.1, %gte.4)\n %add.2 = f32[1024,1024] add(%gte.3, %gte.3)\n %add.3 = f32[1024,1024] add(%gte.4, %gte.4)\n %constant = s32[] constant(1)\n %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)\n ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n tuple(%increment_iteration, %add.0, %add.1, %add.2, %add.3)\n }\n ENTRY test_main {\n %param.0 = f32[1024,1024] parameter(0)\n %param.1 = f32[1024,1024] parameter(1)\n %param.2 = f32[1024,1024] parameter(2)\n %param.3 = f32[1024,1024] parameter(3)\n %constant.0 = s32[] constant(0)\n %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)\n ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n while(%while_init), condition=%cond, body=%body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConcatCodeMotion(2).Run(module.get()));\n ASSERT_TRUE(changed);\n VLOG(1) << module->ToString();\n auto loop = op::While(\n op::Tuple(op::Constant(),\n AllOf(op::Shape(\"f32[2048,1024]\"),\n op::Concatenate(op::Parameter(0), op::Parameter(1))),\n AllOf(op::Shape(\"f32[2048,1024]\"),\n op::Concatenate(op::Parameter(2), op::Parameter(3)))));\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop))));\n}\nTEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsSharedGroups) {\n constexpr absl::string_view kHloModule = R\"(\n HloModule test\n %cond {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %constant = s32[] constant(5)\n ROOT result = pred[] compare(%gte.0, %constant), direction=LT\n }\n %body {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1\n %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2\n %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}\n %ccall = f32[2048,1024] custom-call(%concat), custom_call_target=\"test\"\n %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}\n %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}\n %gte.3 = f32[1024,1024] get-tuple-element(%param), index=3\n %gte.4 = f32[1024,1024] get-tuple-element(%param), index=4\n %concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}\n %ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target=\"test\"\n %slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}\n %slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}\n %add.0 = f32[1024,1024] add(%slice.0, %slice.2)\n %add.1 = f32[1024,1024] add(%slice.1, %slice.3)\n %sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)\n %sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)\n %constant = s32[] constant(1)\n %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)\n ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)\n }\n ENTRY test_main {\n %param.0 = f32[1024,1024] parameter(0)\n %param.1 = f32[1024,1024] parameter(1)\n %param.2 = f32[1024,1024] parameter(2)\n %param.3 = f32[1024,1024] parameter(3)\n %constant.0 = s32[] constant(0)\n %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)\n ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n while(%while_init), condition=%cond, body=%body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConcatCodeMotion(2).Run(module.get()));\n ASSERT_TRUE(changed);\n VLOG(1) << module->ToString();\n auto loop = op::While(\n op::Tuple(op::Constant(),\n AllOf(op::Shape(\"f32[2048,1024]\"),\n op::Concatenate(op::Parameter(0), op::Parameter(1))),\n AllOf(op::Shape(\"f32[2048,1024]\"),\n op::Concatenate(op::Parameter(2), op::Parameter(3)))));\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop))));\n}\nTEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsDifferentOrders) {\n constexpr absl::string_view kHloModule = R\"(\n HloModule test\n %cond {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %constant = s32[] constant(5)\n ROOT result = pred[] compare(%gte.0, %constant), direction=LT\n }\n %body {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1\n %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2\n %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}\n %ccall = f32[2048,1024] custom-call(%concat), custom_call_target=\"test\"\n %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}\n %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}\n %gte.3 = f32[1024,1024] get-tuple-element(%param), index=3\n %gte.4 = f32[1024,1024] get-tuple-element(%param), index=4\n %concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}\n %ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target=\"test\"\n %slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}\n %slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}\n %add.0 = f32[1024,1024] add(%slice.0, %slice.3)\n %add.1 = f32[1024,1024] add(%slice.1, %slice.2)\n %sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)\n %sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)\n %constant = s32[] constant(1)\n %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)\n ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)\n }\n ENTRY test_main {\n %param.0 = f32[1024,1024] parameter(0)\n %param.1 = f32[1024,1024] parameter(1)\n %param.2 = f32[1024,1024] parameter(2)\n %param.3 = f32[1024,1024] parameter(3)\n %constant.0 = s32[] constant(0)\n %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)\n ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])\n while(%while_init), condition=%cond, body=%body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConcatCodeMotion(2).Run(module.get()));\n EXPECT_TRUE(changed);\n VLOG(1) << module->ToString();\n auto loop = op::While(\n op::Tuple(op::Constant(), op::Parameter(0), op::Parameter(1),\n AllOf(op::Shape(\"f32[2048,1024]\"),\n op::Concatenate(op::Parameter(2), op::Parameter(3)))));\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::GetTupleElement(loop), op::GetTupleElement(loop),\n op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop))));\n}\nTEST_F(WhileLoopConcatCodeMotionTest, NonElementwiseOps) {\n constexpr absl::string_view kHloModule = R\"(\n HloModule test\n %cond {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %constant = s32[] constant(5)\n ROOT result = pred[] compare(%gte.0, %constant), direction=LT\n }\n %sum {\n %a = f32[] parameter(0)\n %b = f32[] parameter(1)\n ROOT %add = f32[] add(%a, %b)\n }\n %body {\n %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)\n %gte.0 = s32[] get-tuple-element(%param), index=0\n %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1\n %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2\n %reshape.0 = f32[1,1024,1024] reshape(%gte.1)\n %reshape.1 = f32[1,1024,1024] reshape(%gte.2)\n %concat = f32[2,1024,1024] concatenate(%reshape.0, %reshape.1), dimensions={0}\n %ccall = f32[2,1024,1024] custom-call(%concat), custom_call_target=\"test\"\n %slice.0 = f32[1,1024,1024] slice(%ccall), slice={[0:1], [0:1024], [0:1024]}\n %slice.1 = f32[1,1024,1024] slice(%ccall), slice={[1:2], [0:1024], [0:1024]}\n %reshape.2 = f32[1024,1024] reshape(%slice.0 )\n %reshape.3 = f32[1024,1024] reshape(%slice.1)\n %gte.3 = f32[1024] get-tuple-element(%param), index=3\n %gte.4 = f32[1024] get-tuple-element(%param), index=4\n %constant.0 = f32[] constant(0)\n %reduce.0 = f32[1024] reduce(%reshape.0, %constant.0), to_apply=%sum, dimensions={0,1}\n %reduce.1 = f32[1024] reduce(%reshape.1, %constant.0), to_apply=%sum, dimensions={0,1}\n %add.0 = f32[1024] add(%reduce.0, %gte.3)\n %add.1 = f32[1024] add(%reduce.1, %gte.4)\n %br0 = f32[1024,1024] broadcast(%add.0), dimensions={1}\n %br1 = f32[1024,1024] broadcast(%add.1), dimensions={1}\n %sub.0 = f32[1024,1024] subtract(%reshape.2, %br0)\n %sub.1 = f32[1024,1024] subtract(%reshape.3, %br1)\n %gte.5 = f32[1] get-tuple-element(%param), index=5\n %gte.6 = f32[1] get-tuple-element(%param), index=6\n %reshape.4 = f32[] reshape(%gte.5)\n %reshape.5 = f32[] reshape(%gte.6)\n %br2 = f32[1024] broadcast(%reshape.4), dimensions={}\n %br3 = f32[1024] broadcast(%reshape.5), dimensions={}\n %add.2 = f32[1024] add(%add.0, %br2)\n %add.3 = f32[1024] add(%add.1, %br3)\n %inc0 = f32[] add(%constant.0, %reshape.4)\n %inc1 = f32[] add(%constant.0, %reshape.5)\n %reshape.6 = f32[1] reshape(%inc0)\n %reshape.7 = f32[1] reshape(%inc1)\n %constant = s32[] constant(1)\n %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)\n ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])\n tuple(%increment_iteration, %sub.0, %sub.1, %add.2, %add.3, %reshape.6, %reshape.7)\n }\n ENTRY test_main {\n %param.0 = f32[1024,1024] parameter(0)\n %param.1 = f32[1024,1024] parameter(1)\n %param.2 = f32[1024] parameter(2)\n %param.3 = f32[1024] parameter(3)\n %param.4 = f32[1] parameter(4)\n %param.5 = f32[1] parameter(5)\n %constant.0 = s32[] constant(0)\n %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])\n tuple(%constant.0, %param.0, %param.1, %param.2, %param.3, %param.4, %param.5)\n ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])\n while(%while_init), condition=%cond, body=%body\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n WhileLoopConcatCodeMotion(2).Run(module.get()));\n ASSERT_TRUE(changed);\n VLOG(1) << module->ToString();\n auto loop = op::While(\n op::Tuple(op::Constant(),\n AllOf(op::Shape(\"f32[2,1024,1024]\"),\n op::Concatenate(op::Reshape(op::Parameter(0)),\n op::Reshape(op::Parameter(1)))),\n AllOf(op::Shape(\"f32[2,1024]\"),\n op::Concatenate(op::Reshape(op::Parameter(2)),\n op::Reshape(op::Parameter(3)))),\n AllOf(op::Shape(\"f32[2]\"),\n op::Concatenate(op::Parameter(4), op::Parameter(5)))));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::GetTupleElement(loop),\n op::Reshape(op::Slice(op::GetTupleElement(loop))),\n op::Reshape(op::Slice(op::GetTupleElement(loop))),\n op::Reshape(op::Slice(op::GetTupleElement(loop))),\n op::Reshape(op::Slice(op::GetTupleElement(loop))),\n op::Slice(op::GetTupleElement(loop)),\n op::Slice(op::GetTupleElement(loop))));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1153,"cells":{"ID":{"kind":"string","value":"a90867e2-ebc0-4bfe-a0e5-bb10947c0d3f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"sort_simplifier"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/sort_simplifier.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/sort_simplifier_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/sort_simplifier.h\"\n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\nnamespace xla {\nnamespace {\nabsl::StatusOr RemoveUnusedOperandFromSort(HloInstruction* sort) {\n if (!sort->shape().IsTuple()) {\n return false;\n }\n HloComputation* computation = sort->parent();\n if (computation->root_instruction() == sort) {\n return false;\n }\n absl::flat_hash_set used_indices;\n for (const HloInstruction* user : sort->users()) {\n if (user->opcode() != HloOpcode::kGetTupleElement) {\n return false;\n }\n used_indices.insert(user->tuple_index());\n }\n auto comparator = sort->to_apply();\n for (int64_t i = 0; i < sort->operand_count() * 2; ++i) {\n if (comparator->parameter_instruction(i)->user_count() > 0) {\n used_indices.insert(i / 2);\n }\n }\n if (used_indices.size() == sort->operand_count()) {\n return false;\n }\n std::vector operands;\n std::vector new_shapes;\n for (int64_t i = 0; i < sort->operand_count(); ++i) {\n if (used_indices.contains(i)) {\n operands.push_back(sort->mutable_operand(i));\n new_shapes.push_back(&sort->operand(i)->shape());\n }\n }\n Shape new_sort_shape = new_shapes.size() == 1\n ? *new_shapes[0]\n : ShapeUtil::MakeTupleShapeWithPtrs(new_shapes);\n HloInstruction* new_sort = computation->AddInstruction(\n sort->CloneWithNewOperands(new_sort_shape, operands));\n absl::flat_hash_map>\n replacements;\n int64_t parameter_number = 0;\n for (int64_t i = 0; i < sort->operand_count(); ++i) {\n auto* old_lhs_parameter = comparator->parameter_instruction(i * 2);\n auto* old_rhs_parameter = comparator->parameter_instruction(i * 2 + 1);\n if (used_indices.contains(i)) {\n Shape scalar_shape =\n ShapeUtil::MakeShape(sort->operand(i)->shape().element_type(), {});\n replacements[old_lhs_parameter] = HloInstruction::CreateParameter(\n parameter_number, scalar_shape,\n absl::StrCat(\"p.\", parameter_number / 2, \".lhs\"));\n ++parameter_number;\n replacements[old_rhs_parameter] = HloInstruction::CreateParameter(\n parameter_number, scalar_shape,\n absl::StrCat(\"p.\", parameter_number / 2, \".rhs\"));\n ++parameter_number;\n } else {\n replacements[old_lhs_parameter] = nullptr;\n replacements[old_rhs_parameter] = nullptr;\n }\n }\n HloModule* module = sort->GetModule();\n HloComputation* new_compare = module->AddEmbeddedComputation(\n comparator->CloneWithReplacements(&replacements));\n new_sort->set_to_apply(new_compare);\n absl::flat_hash_map result_map;\n if (new_sort->shape().IsTuple()) {\n int64_t new_index = 0;\n for (int64_t i = 0; i < sort->operand_count(); ++i) {\n if (used_indices.count(i)) {\n result_map[i] =\n computation->AddInstruction(HloInstruction::CreateGetTupleElement(\n *new_shapes[new_index], new_sort, new_index));\n ++new_index;\n }\n }\n } else {\n CHECK_EQ(used_indices.size(), 1);\n result_map[*used_indices.begin()] = new_sort;\n }\n std::vector users(sort->users().begin(),\n sort->users().end());\n for (HloInstruction* user : users) {\n TF_RETURN_IF_ERROR(\n user->ReplaceAllUsesWith(result_map.at(user->tuple_index())));\n TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(user));\n }\n return true;\n}\n} \nabsl::StatusOr SortSimplifier::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n VLOG(2) << \"HLO module before SortSimplifier:\";\n XLA_VLOG_LINES(2, module->ToString());\n bool changed = false;\n std::vector sort_instrs;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n absl::c_copy_if(comp->instructions(), std::back_inserter(sort_instrs),\n HloPredicateIsOp);\n }\n for (HloInstruction* sort_instr : sort_instrs) {\n TF_ASSIGN_OR_RETURN(bool result, RemoveUnusedOperandFromSort(sort_instr));\n changed |= result;\n }\n if (changed) {\n VLOG(2) << \"HLO module after SortSimplifier:\";\n XLA_VLOG_LINES(2, module->ToString());\n } else {\n VLOG(2) << \"HLO module unchanged after SortSimplifier\";\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/sort_simplifier.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\nnamespace xla {\nnamespace {\nnamespace m = match;\nusing SortSimplifierTest = HloTestBase;\nTEST_F(SortSimplifierTest, RemoveUnusedSortOperandArrayResult) {\n const char* hlo_string = R\"(\n HloModule permutation_sort\n compare {\n p.0.lhs = f32[] parameter(0)\n p.0.rhs = f32[] parameter(1)\n p.1.lhs = s32[] parameter(2)\n p.1.rhs = s32[] parameter(3)\n ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT\n }\n ENTRY sort_computation {\n keys = f32[64,8732]{1,0} parameter(0)\n values = s32[64,8732]{1,0} parameter(1)\n sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),\n dimensions={1}, to_apply=compare\n ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n SortSimplifier simplifier;\n uint64_t num_executions = 0;\n do {\n num_executions++;\n } while (simplifier.Run(module.get()).value());\n EXPECT_EQ(num_executions, 2);\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(0))));\n}\nTEST_F(SortSimplifierTest, RemoveUnusedSortOperandTuple) {\n const char* hlo_string = R\"(\n HloModule permutation_sort\n compare {\n p.0.lhs = f32[] parameter(0)\n p.0.rhs = f32[] parameter(1)\n p.1.lhs = s32[] parameter(2)\n p.1.rhs = s32[] parameter(3)\n p.2.lhs = u32[] parameter(4)\n p.2.rhs = u32[] parameter(5)\n ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT\n }\n ENTRY sort_computation {\n keys = f32[64,87] parameter(0)\n values.0 = s32[64,87] parameter(1)\n values.1 = u32[64,87] parameter(2)\n sort = (f32[64,87], s32[64,87], u32[64,87]) sort(\n keys, values.0, values.1),\n dimensions={1}, to_apply=compare\n gte.0 = f32[64,87] get-tuple-element(sort), index=0\n gte.1 = u32[64,87] get-tuple-element(sort), index=2\n ROOT tuple = (f32[64,87], u32[64,87]) tuple(gte.0, gte.1)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n SortSimplifier simplifier;\n EXPECT_TRUE(simplifier.Run(module.get()).value());\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 0),\n m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 1))));\n}\nTEST_F(SortSimplifierTest, DontRemoveUnusedSortKey) {\n const char* hlo_string = R\"(\n HloModule permutation_sort\n compare {\n p.0.lhs = f32[] parameter(0)\n p.0.rhs = f32[] parameter(1)\n p.1.lhs = s32[] parameter(2)\n p.1.rhs = s32[] parameter(3)\n ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT\n }\n ENTRY sort_computation {\n keys = f32[64,8732]{1,0} parameter(0)\n values = s32[64,8732]{1,0} parameter(1)\n sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values), dimensions={1}, to_apply=compare\n ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n SortSimplifier simplifier;\n EXPECT_FALSE(simplifier.Run(module.get()).value());\n}\nTEST_F(SortSimplifierTest, RemoveUnusedFirstOperand) {\n const char* hlo_string = R\"(\n HloModule permutation_sort\n compare {\n p.0.lhs = f32[] parameter(0)\n p.0.rhs = f32[] parameter(1)\n p.1.lhs = s32[] parameter(2)\n p.1.rhs = s32[] parameter(3)\n ROOT lt = pred[] compare(p.1.lhs, p.1.rhs), direction=LT\n }\n ENTRY sort_computation {\n keys = f32[64,8732]{1,0} parameter(0)\n values = s32[64,8732]{1,0} parameter(1)\n sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),\n dimensions={1}, to_apply=compare\n ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n SortSimplifier simplifier;\n uint64_t num_executions = 0;\n do {\n num_executions++;\n } while (simplifier.Run(module.get()).value());\n EXPECT_EQ(num_executions, 2);\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(1))));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sort_simplifier.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sort_simplifier_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1154,"cells":{"ID":{"kind":"string","value":"8585c3c1-3110-4d7f-9df7-16798a77f5b7"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"stochastic_convert_decomposer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/stochastic_convert_decomposer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/stochastic_convert_decomposer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/stochastic_convert_decomposer.h\"\n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nabsl::Status DecomposeStochasticConvert(HloComputation* comp,\n HloInstruction* instruction) {\n CHECK(instruction->opcode() == HloOpcode::kStochasticConvert)\n << \"requires a stochastic_convert instruction to decompose, but got: \"\n << instruction->opcode();\n CHECK(instruction->operand_count() == 2)\n << \"requires 2 operands for stochastic convert, but got: \"\n << instruction->operand_count();\n HloInstruction* operand = instruction->mutable_operand(0);\n HloInstruction* random = instruction->mutable_operand(1);\n PrimitiveType from_type = operand->shape().element_type();\n PrimitiveType random_type = random->shape().element_type();\n PrimitiveType to_type = instruction->shape().element_type();\n TF_RETURN_IF_ERROR(ShapeInference::InferStochasticConvertShape(\n operand->shape(), random->shape(), to_type)\n .status());\n VLOG(1) << \"Decomposing instruction: \" << instruction->ToString();\n if (primitive_util::IsSignedIntegralType(to_type)) {\n TF_ASSIGN_OR_RETURN(HloInstruction * operand_sign,\n MakeUnaryHlo(HloOpcode::kSign, operand));\n TF_ASSIGN_OR_RETURN(HloInstruction * should_neg,\n MakeCompareHlo(Comparison::Direction::kLt, operand_sign,\n MakeScalarLike(operand_sign, 0)));\n TF_ASSIGN_OR_RETURN(HloInstruction * operand_abs,\n MakeUnaryHlo(HloOpcode::kAbs, operand));\n TF_ASSIGN_OR_RETURN(HloInstruction * truncated_fp,\n MakeUnaryHlo(HloOpcode::kFloor, operand_abs));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * fractional,\n MakeBinaryHlo(HloOpcode::kSubtract, operand_abs, truncated_fp));\n if (from_type == F16) {\n fractional = MakeConvertToHlo(fractional, F32);\n }\n TF_ASSIGN_OR_RETURN(\n HloInstruction * fixed_fractional,\n MakeBinaryHlo(\n HloOpcode::kMultiply, fractional,\n MakeScalarLike(fractional, IPow(2, primitive_util::BitWidth(\n random_type)))));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * should_round_up,\n MakeCompareHlo(Comparison::Direction::kLt, random,\n MakeConvertToHlo(fixed_fractional, random_type)));\n HloInstruction* truncated_int = MakeConvertToHlo(truncated_fp, to_type);\n TF_ASSIGN_OR_RETURN(\n truncated_int,\n MakeSelectHlo(should_round_up,\n MakeBinaryHlo(HloOpcode::kAdd, truncated_int,\n MakeScalarLike(truncated_int, 1))\n .value(),\n truncated_int));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * result,\n MakeSelectHlo(should_neg,\n MakeUnaryHlo(HloOpcode::kNegate, truncated_int).value(),\n truncated_int));\n auto to_bits = primitive_util::BitWidth(to_type);\n auto min = static_cast(\n (static_cast(1) + ~static_cast(1))\n << (to_bits - 1));\n TF_ASSIGN_OR_RETURN(HloInstruction * is_min,\n MakeCompareHlo(Comparison::Direction::kLe, operand,\n MakeScalarLike(operand, min)));\n TF_ASSIGN_OR_RETURN(\n result, MakeSelectHlo(is_min, MakeScalarLike(result, min), result));\n auto max =\n static_cast((static_cast(1) << (to_bits - 1)) - 1);\n TF_ASSIGN_OR_RETURN(HloInstruction * is_max,\n MakeCompareHlo(Comparison::Direction::kGe, operand,\n MakeScalarLike(operand, max)));\n TF_ASSIGN_OR_RETURN(\n result, MakeSelectHlo(is_max, MakeScalarLike(result, max), result));\n TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(result));\n TF_RETURN_IF_ERROR(comp->RemoveInstruction(instruction));\n return absl::OkStatus();\n }\n return Internal(\"Unsupported stochastic convert: from %s to %s\",\n PrimitiveType_Name(from_type),\n PrimitiveType_Name(to_type));\n}\nabsl::StatusOr StochasticConvertDecomposer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n for (HloInstruction* instruction :\n computation->MakeInstructionPostOrder()) {\n if (instruction->opcode() != HloOpcode::kStochasticConvert) {\n continue;\n }\n TF_RETURN_IF_ERROR(DecomposeStochasticConvert(computation, instruction));\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/stochastic_convert_decomposer.h\"\n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nusing StochasticConvertDecomposerTest = HloTestBase;\nusing ::testing::HasSubstr;\nTEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertF32ToS32) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n %arg_param.1 = f32[65536]{0} parameter(0)\n %random_param.2 = u32[65536]{0} parameter(1)\n ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n StochasticConvertDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Select(op::Compare(), op::Broadcast(),\n op::Select(op::Compare(), op::Broadcast(),\n op::Select(op::Compare(), op::Negate(),\n op::Select()))));\n}\nTEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertBF16ToS8) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n %arg_param.1 = bf16[65536]{0} parameter(0)\n %random_param.2 = u16[65536]{0} parameter(1)\n ROOT %stochastic-convert.3 = s8[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u16[65536]{0} %random_param.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n StochasticConvertDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Select(op::Compare(), op::Broadcast(),\n op::Select(op::Compare(), op::Broadcast(),\n op::Select(op::Compare(), op::Negate(),\n op::Select()))));\n}\nTEST_F(StochasticConvertDecomposerTest, WrongRandomBitWidth) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n %arg_param.1 = bf16[65536]{0} parameter(0)\n %random_param.2 = u32[65536]{0} parameter(1)\n ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n StochasticConvertDecomposer decomposer;\n auto result = decomposer.Run(module.get());\n EXPECT_NE(absl::OkStatus(), result.status());\n EXPECT_THAT(result.status().message(), HasSubstr(\"have same bits\"));\n}\nTEST_F(StochasticConvertDecomposerTest, WrongRandomType) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n %arg_param.1 = f32[65536]{0} parameter(0)\n %random_param.2 = s32[65536]{0} parameter(1)\n ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, s32[65536]{0} %random_param.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n StochasticConvertDecomposer decomposer;\n auto result = decomposer.Run(module.get());\n EXPECT_NE(absl::OkStatus(), result.status());\n EXPECT_THAT(result.status().message(),\n HasSubstr(\"must be unsigned integers\"));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1155,"cells":{"ID":{"kind":"string","value":"ebd2d89a-eae6-4d8a-8ae4-35118a5a7120"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"map_inliner"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/map_inliner.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/map_inliner_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/map_inliner.h\"\n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nclass MapInlinerVisitor : public DfsHloVisitorWithDefault {\n public:\n explicit MapInlinerVisitor(HloComputation* computation)\n : computation_(computation) {}\n absl::Status DefaultAction(HloInstruction* ) override {\n return absl::OkStatus();\n }\n absl::Status HandleMap(HloInstruction* map) override;\n absl::StatusOr Run(HloComputation* computation);\n private:\n HloComputation* computation_;\n bool changed_ = false;\n};\nabsl::StatusOr MapInlinerVisitor::Run(HloComputation* computation) {\n changed_ = false;\n computation_ = computation;\n TF_RETURN_IF_ERROR(computation->root_instruction()->Accept(this));\n return changed_;\n}\nabsl::Status MapInlinerVisitor::HandleMap(HloInstruction* map) {\n HloComputation* function = map->to_apply();\n HloInstruction& root = *function->root_instruction();\n if (hlo_query::AllOperandsAreParameters(root)) {\n if (root.opcode() == HloOpcode::kFusion) {\n return absl::OkStatus();\n }\n VLOG(10) << \"inlining map({X ... Y}, op) => : op(X ... Y) with function \"\n << root.ToShortString();\n if (root.opcode() == HloOpcode::kParameter) {\n TF_RETURN_IF_ERROR(\n map->ReplaceAllUsesWith(map->operands()[root.parameter_number()]));\n TF_RETURN_IF_ERROR(computation_->RemoveInstruction(map));\n } else if (root.opcode() == HloOpcode::kConstant) {\n HloInstruction* constant = computation_->AddInstruction(root.Clone());\n HloInstruction* placed_instruction = computation_->AddInstruction(\n HloInstruction::CreateBroadcast(map->shape(), constant, {}));\n TF_RETURN_IF_ERROR(\n computation_->ReplaceInstruction(map, placed_instruction));\n } else {\n std::vector params;\n for (int64_t o = 0; o < root.operands().size(); o++) {\n params.push_back(map->operands()[root.operand(o)->parameter_number()]);\n }\n HloInstruction* placed_instruction = computation_->AddInstruction(\n root.CloneWithNewOperands(map->shape(), params));\n TF_RETURN_IF_ERROR(\n computation_->ReplaceInstruction(map, placed_instruction));\n }\n changed_ = true;\n return absl::OkStatus();\n }\n return absl::OkStatus();\n}\nabsl::StatusOr MapInliner::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n MapInlinerVisitor visitor(nullptr);\n bool changed = false;\n for (HloComputation* computation : module->computations(execution_threads)) {\n TF_ASSIGN_OR_RETURN(bool computation_changed, visitor.Run(computation));\n changed |= computation_changed;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/map_inliner.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/literal_test_util.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nusing MapInlinerTest = HloTestBase;\nTEST_F(MapInlinerTest, MapMax) {\n Shape r0f32 = ShapeUtil::MakeShape(F32, {});\n auto max_builder = HloComputation::Builder(TestName());\n auto param1 = max_builder.AddInstruction(\n HloInstruction::CreateParameter(0, r0f32, \"x\"));\n auto param2 = max_builder.AddInstruction(\n HloInstruction::CreateParameter(1, r0f32, \"y\"));\n max_builder.AddInstruction(HloInstruction::CreateBinary(\n param1->shape(), HloOpcode::kMaximum, param1, param2));\n auto max_f32 = max_builder.Build();\n auto builder = HloComputation::Builder(\"MapMaxFunction\");\n auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({1, 2, 3, 4})));\n auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({4, 3, 2, 1})));\n builder.AddInstruction(\n HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));\n auto computation = builder.Build();\n auto hlo_module = CreateNewVerifiedModule();\n hlo_module->AddEmbeddedComputation(std::move(max_f32));\n hlo_module->AddEntryComputation(std::move(computation));\n MapInliner inliner;\n EXPECT_TRUE(inliner.Run(hlo_module.get()).value());\n EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),\n op::Maximum(lhs, rhs));\n auto result = ExecuteAndTransfer(hlo_module->Clone(), {});\n auto expected = LiteralUtil::CreateR1({4, 3, 3, 4});\n EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));\n}\nTEST_F(MapInlinerTest, MapConstant) {\n Shape r0f32 = ShapeUtil::MakeShape(F32, {});\n auto const2_builder = HloComputation::Builder(TestName());\n auto param1 = const2_builder.AddInstruction(\n HloInstruction::CreateParameter(0, r0f32, \"x\"));\n (void)param1;\n const2_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0f)));\n auto const2_f32 = const2_builder.Build();\n auto builder = HloComputation::Builder(\"MapConstFunction\");\n auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR2({{1, 2, 3, 4}, {5, 6, 7, 8}})));\n builder.AddInstruction(\n HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get()));\n auto computation = builder.Build();\n auto hlo_module = CreateNewVerifiedModule();\n hlo_module->AddEmbeddedComputation(std::move(const2_f32));\n hlo_module->AddEntryComputation(std::move(computation));\n HloInstruction* root = hlo_module->entry_computation()->root_instruction();\n MapInliner inliner;\n EXPECT_TRUE(inliner.Run(hlo_module.get()).value());\n root = hlo_module->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::Broadcast(op::Constant()));\n auto result = ExecuteAndTransfer(hlo_module->Clone(), {});\n auto expected = LiteralUtil::CreateR2({{2, 2, 2, 2}, {2, 2, 2, 2}});\n EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));\n}\nTEST_F(MapInlinerTest, MapSubtractOppositeOrder) {\n Shape r0f32 = ShapeUtil::MakeShape(F32, {});\n auto max_builder = HloComputation::Builder(TestName());\n auto param1 = max_builder.AddInstruction(\n HloInstruction::CreateParameter(1, r0f32, \"x\"));\n auto param2 = max_builder.AddInstruction(\n HloInstruction::CreateParameter(0, r0f32, \"y\"));\n max_builder.AddInstruction(HloInstruction::CreateBinary(\n param1->shape(), HloOpcode::kSubtract, param1, param2));\n auto max_f32 = max_builder.Build();\n auto builder = HloComputation::Builder(\"MapSubFunction\");\n auto lhs = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({1, 2, 3, 4})));\n auto rhs = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({4, 3, 2, 1})));\n builder.AddInstruction(\n HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get()));\n auto computation = builder.Build();\n auto hlo_module = CreateNewVerifiedModule();\n hlo_module->AddEmbeddedComputation(std::move(max_f32));\n hlo_module->AddEntryComputation(std::move(computation));\n MapInliner inliner;\n EXPECT_TRUE(inliner.Run(hlo_module.get()).value());\n EXPECT_THAT(hlo_module->entry_computation()->root_instruction(),\n op::Subtract(rhs, lhs));\n auto result = ExecuteAndTransfer(hlo_module->Clone(), {});\n auto expected = LiteralUtil::CreateR1({3, 1, -1, -3});\n EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));\n}\nTEST_F(MapInlinerTest, MapParameter) {\n Shape r0f32 = ShapeUtil::MakeShape(F32, {});\n auto param_builder = HloComputation::Builder(TestName());\n param_builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32, \"p0\"));\n param_builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, \"p1\"));\n auto param_f32 = param_builder.Build();\n auto builder = HloComputation::Builder(\"MapParamFunction\");\n auto lhs = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1)));\n auto rhs = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(4)));\n builder.AddInstruction(\n HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, param_f32.get()));\n auto computation = builder.Build();\n auto hlo_module = CreateNewVerifiedModule();\n hlo_module->AddEmbeddedComputation(std::move(param_f32));\n hlo_module->AddEntryComputation(std::move(computation));\n MapInliner inliner;\n EXPECT_TRUE(inliner.Run(hlo_module.get()).value());\n EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), rhs);\n auto result = ExecuteAndTransfer(hlo_module->Clone(), {});\n auto expected = LiteralUtil::CreateR0(4);\n EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/map_inliner.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/map_inliner_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1156,"cells":{"ID":{"kind":"string","value":"f522495c-3a6f-4a50-bfd7-e7546441fa98"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"triangular_solve_expander"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/triangular_solve_expander.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/triangular_solve_expander_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/triangular_solve_expander.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/builder/lib/constants.h\"\n#include \"xla/hlo/builder/lib/math.h\"\n#include \"xla/hlo/builder/lib/matrix.h\"\n#include \"xla/hlo/builder/lib/slicing.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/hlo/builder/xla_computation.h\"\n#include \"xla/hlo/ir/hlo_clone_context.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nXlaOp DiagonalBlocks(XlaOp a, int64_t block_size) {\n XlaBuilder* builder = a.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(a));\n int ndims = shape.rank();\n int64_t n = ShapeUtil::GetDimension(shape, -1);\n int64_t num_blocks = n / block_size;\n absl::Span batch_dims = absl::MakeConstSpan(\n shape.dimensions().begin(), shape.dimensions().begin() + (ndims - 2));\n XlaOp diag_blocks;\n if (n == block_size) {\n std::vector permutation(ndims);\n std::iota(permutation.begin(), permutation.end(), 1);\n permutation.insert(permutation.end() - 2, 0);\n return Transpose(Broadcast(a, {1}), permutation);\n }\n if (n > block_size) {\n auto start_indices =\n Transpose(Broadcast(Mul(Iota(builder, S32, num_blocks),\n ConstantR0(builder, block_size)),\n {2}),\n {1, 0});\n std::vector slice_sizes(ndims);\n GatherDimensionNumbers dim_numbers;\n for (int i = 0; i < ndims - 2; ++i) {\n dim_numbers.add_offset_dims(i);\n slice_sizes[i] = ShapeUtil::GetDimension(shape, i);\n }\n slice_sizes[ndims - 2] = slice_sizes[ndims - 1] = block_size;\n dim_numbers.add_offset_dims(ndims - 1);\n dim_numbers.add_offset_dims(ndims);\n dim_numbers.add_start_index_map(ndims - 2);\n dim_numbers.add_start_index_map(ndims - 1);\n dim_numbers.set_index_vector_dim(1);\n diag_blocks = Gather(a, start_indices, dim_numbers, slice_sizes);\n }\n if (n % block_size != 0) {\n auto last_blocks =\n SliceInMinorDims(a, {n - n % block_size, n - n % block_size}, {n, n});\n PaddingConfig config = MakeNoPaddingConfig(ndims);\n int64_t padding = block_size - n % block_size;\n config.mutable_dimensions(ndims - 2)->set_edge_padding_high(padding);\n last_blocks =\n Pad(last_blocks, Zero(builder, shape.element_type()), config);\n auto eye =\n IdentityMatrix(builder, shape.element_type(), padding, padding);\n config = MakeNoPaddingConfig(2);\n config.mutable_dimensions(0)->set_edge_padding_low(n % block_size);\n eye = Pad(eye, Zero(builder, shape.element_type()), config);\n eye = Broadcast(eye, batch_dims);\n last_blocks = ConcatInDim(builder, {last_blocks, eye}, ndims - 1);\n TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(last_blocks));\n auto shape_dims = blocks_shape.dimensions();\n auto last_blocks_dims = std::vector(ndims);\n std::copy(shape_dims.begin(), shape_dims.end(), last_blocks_dims.begin());\n last_blocks_dims.insert(last_blocks_dims.end() - 2, 1);\n last_blocks = Reshape(last_blocks, last_blocks_dims);\n if (n > block_size) {\n diag_blocks =\n ConcatInDim(builder, {diag_blocks, last_blocks}, ndims - 2);\n } else {\n diag_blocks = last_blocks;\n }\n }\n return diag_blocks;\n });\n}\nXlaOp SolveWithInvertedDiagonalBlocks(XlaOp a, XlaOp b, XlaOp inv_diag_blocks,\n bool left_side, bool lower,\n bool transpose_a, bool conjugate_a,\n PrecisionConfig::Precision precision) {\n XlaBuilder* builder = a.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(inv_diag_blocks));\n TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));\n int64_t block_size = ShapeUtil::GetDimension(blocks_shape, -1);\n TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));\n int64_t ndims = a_shape.rank();\n int64_t n = ShapeUtil::GetDimension(a_shape, -1);\n int64_t num_blocks = n / block_size + (n % block_size != 0);\n int64_t m_dim = (left_side) ? -1 : -2;\n int64_t m = ShapeUtil::GetDimension(b_shape, m_dim);\n std::vector update_ops;\n int bdims = b_shape.rank();\n int64_t block_dim = (left_side) ? bdims - 2 : bdims - 1;\n XlaOp x;\n for (int i = 0; i < num_blocks; i++) {\n bool backward = left_side ^ lower ^ transpose_a;\n auto j = backward ? num_blocks - 1 - i : i;\n int64_t block = (n % block_size != 0 && j + 1 == num_blocks)\n ? n % block_size\n : block_size;\n auto inv_block =\n MaybeConjugate(Collapse(SliceInMinorDims(inv_diag_blocks, {j, 0, 0},\n {j + 1, block, block}),\n {ndims - 2, ndims - 1}),\n conjugate_a);\n int64_t k = std::min((j + 1) * block_size, n);\n std::vector start = {j * block_size, 0};\n std::vector end = {k, m};\n if (!left_side) {\n std::swap(start[0], start[1]);\n std::swap(end[0], end[1]);\n }\n auto b_row = SliceInMinorDims(b, start, end);\n XlaOp remainder;\n if (i == 0) {\n remainder = b_row;\n } else {\n if (backward) {\n start = {j * block_size,\n std::max(int64_t{0}, (num_blocks - i) * block_size)};\n end = {k, n};\n } else {\n start = {j * block_size, 0};\n end = {k, std::min(i * block_size, n)};\n }\n if (!left_side ^ transpose_a) {\n std::swap(start[0], start[1]);\n std::swap(end[0], end[1]);\n }\n auto a_row =\n MaybeConjugate(SliceInMinorDims(a, start, end), conjugate_a);\n if (left_side) {\n remainder = b_row - BatchDot(a_row, transpose_a, x, false, precision);\n } else {\n remainder = b_row - BatchDot(x, false, a_row, transpose_a, precision);\n }\n }\n XlaOp x_update;\n if (left_side) {\n x_update =\n BatchDot(inv_block, transpose_a, remainder, false, precision);\n } else {\n x_update =\n BatchDot(remainder, false, inv_block, transpose_a, precision);\n }\n if (i == 0) {\n x = x_update;\n } else {\n if (backward) {\n x = ConcatInDim(builder, {x_update, x}, block_dim);\n } else {\n x = ConcatInDim(builder, {x, x_update}, block_dim);\n }\n }\n }\n return x;\n });\n}\n} \nXlaOp TriangularSolveExpander::InvertDiagonalBlocks(\n XlaOp diag_blocks, bool lower_triangular,\n PrecisionConfig::Precision precision) {\n XlaBuilder* builder = diag_blocks.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(diag_blocks));\n int64_t block_size = ShapeUtil::GetDimension(shape, -1);\n int64_t num_blocks = ShapeUtil::ElementsIn(shape) / IPow(block_size, 2);\n diag_blocks = Reshape(diag_blocks, {num_blocks, block_size, block_size});\n diag_blocks = Triangle(diag_blocks, lower_triangular);\n auto diags = GetMatrixDiagonal(diag_blocks);\n auto scaled_diag_blocks = Div(diag_blocks, diags, {0, 2});\n auto identity =\n IdentityMatrix(builder, shape.element_type(), block_size, block_size);\n auto neg_identity = -identity;\n auto pos_one = Reshape(One(builder, shape.element_type()), {1, 1});\n auto start_index =\n ConstantR0(builder, lower_triangular ? 0 : block_size - 1);\n auto output_block =\n DynamicUpdateSlice(neg_identity, pos_one,\n {start_index, start_index});\n XlaOp output = Broadcast(output_block,\n {num_blocks});\n std::vector tuple_shapes = {\n ShapeUtil::MakeShape(S32, {}),\n ShapeUtil::MakeShape(shape.element_type(),\n {num_blocks, block_size, block_size}),\n ShapeUtil::MakeShape(shape.element_type(),\n {num_blocks, block_size, block_size})};\n Shape tuple_shape = ShapeUtil::MakeTupleShape(tuple_shapes);\n auto init_i = One(builder, S32);\n auto init = Tuple(builder, {init_i, output, scaled_diag_blocks});\n std::unique_ptr condb =\n builder->CreateSubBuilder(\"InvertDiagCond\");\n {\n auto i = GetTupleElement(\n Parameter(condb.get(), 0, tuple_shape, \"InvertDiagCondTuple\"), 0);\n Lt(i, ConstantR0(condb.get(), block_size));\n }\n TF_ASSIGN_OR_RETURN(auto cond, condb->Build());\n std::unique_ptr bodyb =\n builder->CreateSubBuilder(\"InvertDiagBody\");\n {\n auto input_tuple =\n Parameter(bodyb.get(), 0, tuple_shape, \"InvertDiagBodyTuple\");\n auto i = GetTupleElement(input_tuple, 0);\n auto body_out = GetTupleElement(input_tuple, 1);\n auto body_input = GetTupleElement(input_tuple, 2);\n auto zero = ConstantR0(bodyb.get(), 0);\n auto j = lower_triangular ? i : ScalarLike(i, block_size - 1) - i;\n auto input_row =\n DynamicSlice(body_input, {zero, j, zero},\n {num_blocks, 1, block_size});\n DotDimensionNumbers dnums;\n dnums.add_lhs_batch_dimensions(0);\n dnums.add_rhs_batch_dimensions(0);\n dnums.add_lhs_contracting_dimensions(2);\n dnums.add_rhs_contracting_dimensions(1);\n PrecisionConfig precision_proto;\n precision_proto.add_operand_precision(precision);\n precision_proto.add_operand_precision(precision);\n auto update = -DotGeneral(input_row, body_out, dnums, &precision_proto);\n body_out = DynamicUpdateSlice(body_out, update, {zero, j, zero});\n auto next_i = i + ScalarLike(i, 1);\n Tuple(bodyb.get(), {next_i, body_out, body_input});\n }\n TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());\n auto invert_while = While(cond, body, init);\n auto inv_diag_blocks = GetTupleElement(invert_while, 1);\n inv_diag_blocks = Div(inv_diag_blocks, diags,\n {0, 1});\n return Reshape(inv_diag_blocks, shape.dimensions());\n });\n}\nXlaOp TriangularSolveExpander::SolveByInvertingDiagonalBlocks(\n XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,\n bool conjugate_a, bool unit_diagonal,\n PrecisionConfig::Precision precision) {\n XlaBuilder* builder = a.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));\n const int64_t ndims = a_shape.rank();\n int64_t k = ShapeUtil::GetDimension(a_shape, -1);\n if (unit_diagonal) {\n a = lower ? Select(TriangleMask(a, -1), a, ZerosLike(a))\n : Select(TriangleMask(a, 0), ZerosLike(a), a);\n a = xla::Add(a, IdentityMatrix(builder, a_shape.element_type(), k, k),\n {ndims - 2, ndims - 1});\n } else {\n a = Triangle(a, lower);\n }\n int64_t block_size = std::min(block_size_, k);\n auto diag_blocks = DiagonalBlocks(a, block_size);\n auto inv_diag_blocks = InvertDiagonalBlocks(diag_blocks, lower, precision);\n return SolveWithInvertedDiagonalBlocks(a, b, inv_diag_blocks, left_side,\n lower, transpose_a, conjugate_a,\n precision);\n });\n}\nXlaOp TriangularSolveExpander::SolveDirectly(\n XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,\n bool conjugate_a, bool unit_diagonal,\n PrecisionConfig::Precision precision) {\n XlaBuilder* builder = a.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));\n TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));\n int64_t m = ShapeUtil::GetDimension(b_shape, -2);\n int64_t n = ShapeUtil::GetDimension(b_shape, -1);\n const int64_t a_size = ShapeUtil::GetDimension(a_shape, -1);\n a = MaybeConjugate(a, conjugate_a);\n bool backwards = transpose_a ^ lower ^ !left_side;\n for (int64_t i = 0; i < a_size; ++i) {\n int64_t j = backwards ? i : (a_size - i - 1);\n std::vector b_row_start, b_row_end;\n if (left_side) {\n b_row_start = {j, 0};\n b_row_end = {j + 1, n};\n } else {\n b_row_start = {0, j};\n b_row_end = {m, j + 1};\n }\n auto b_row = SliceInMinorDims(b, b_row_start, b_row_end);\n std::vector a_start = {j, backwards ? 0 : (j + 1)};\n std::vector a_end = {j + 1, backwards ? j : a_size};\n if (transpose_a ^ !left_side) {\n std::swap(a_start[0], a_start[1]);\n std::swap(a_end[0], a_end[1]);\n }\n auto a_chunk = SliceInMinorDims(a, a_start, a_end);\n if (left_side) {\n bool which = transpose_a ^ lower;\n auto b_chunk =\n SliceInMinorDims(b, {which ? 0 : (j + 1), 0}, {which ? j : m, n});\n b_row = b_row - BatchDot(a_chunk, transpose_a, b_chunk,\n false, precision);\n } else {\n bool which = transpose_a ^ !lower;\n auto b_chunk =\n SliceInMinorDims(b, {0, which ? 0 : (j + 1)}, {m, which ? j : n});\n b_row = b_row - BatchDot(b_chunk, false, a_chunk,\n transpose_a, precision);\n }\n if (!unit_diagonal) {\n auto a_diag = SliceInMinorDims(a, {j, j}, {j + 1, j + 1});\n b_row = b_row / a_diag;\n }\n b = UpdateSliceInMinorDims(b, b_row, b_row_start);\n }\n return b;\n });\n}\nXlaOp TriangularSolveExpander::BuildTriangularSolve(\n XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a,\n bool conjugate_a, bool unit_diagonal, int64_t block_size,\n PrecisionConfig::Precision precision) {\n XlaBuilder* builder = a.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));\n TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b));\n if (a_shape.rank() != b_shape.rank()) {\n return InvalidArgument(\n \"Arguments to TriangularSolve have shapes with different ranks: \"\n \"%s vs. %s\",\n ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));\n }\n const int64_t ndims = a_shape.rank();\n if (ndims < 2) {\n return InvalidArgument(\n \"Arguments to TriangularSolve was rank %d but must have rank >= 2.\",\n ndims);\n }\n std::vector batch_dimensions;\n int64_t batch = 1;\n for (int i = 0; i < ndims - 2; ++i) {\n int64_t a_size = a_shape.dimensions(i);\n int64_t b_size = b_shape.dimensions(i);\n if (a_size != b_size) {\n return InvalidArgument(\n \"Batch dimensions of arguments to TriangularSolve must be equal; \"\n \"shapes were %s and %s.\",\n ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));\n }\n batch_dimensions.push_back(a_size);\n batch *= a_size;\n }\n if (ShapeUtil::GetDimension(a_shape, -1) !=\n ShapeUtil::GetDimension(a_shape, -2)) {\n return InvalidArgument(\n \"The 'a' argument to TriangularSolve must be a batched square matrix;\"\n \" shape was: %s\",\n ShapeUtil::HumanString(a_shape));\n }\n const int64_t m = ShapeUtil::GetDimension(b_shape, -2);\n const int64_t n = ShapeUtil::GetDimension(b_shape, -1);\n if ((left_side ? m : n) != ShapeUtil::GetDimension(a_shape, -1)) {\n return InvalidArgument(\n \"Arguments to TriangularSolve have incompatible matrix shapes %s and \"\n \"%s\",\n ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape));\n }\n int64_t a_size = ShapeUtil::GetDimension(a_shape, -1);\n if (ShapeUtil::IsZeroElementArray(b_shape)) {\n return b;\n }\n if (a_size == 1) {\n return unit_diagonal ? b : Div(b, MaybeConjugate(a, conjugate_a));\n }\n if (UseDirectSolves() && batch > block_size_ / 16 &&\n a_size < block_size_ / 4) {\n return SolveDirectly(a, b, left_side, lower, transpose_a, conjugate_a,\n unit_diagonal, precision);\n } else {\n return SolveByInvertingDiagonalBlocks(a, b, left_side, lower, transpose_a,\n conjugate_a, unit_diagonal,\n precision);\n }\n });\n}\nTriangularSolveExpander::TriangularSolveExpander(int64_t block_size)\n : block_size_(block_size) {\n CHECK_GE(block_size_, 1);\n}\nbool TriangularSolveExpander::InstructionMatchesPattern(\n HloInstruction* instruction) {\n return instruction->opcode() == HloOpcode::kTriangularSolve;\n}\nabsl::StatusOr TriangularSolveExpander::ExpandInstruction(\n HloInstruction* instruction) {\n const TriangularSolveOptions& options =\n instruction->triangular_solve_options();\n const std::string name = absl::StrFormat(\n \"xla.triangular_solve_%s_%s_%s_%s_%s_%s\",\n instruction->operand(0)->shape().ToString(),\n instruction->operand(1)->shape().ToString(),\n options.left_side() ? \"left\" : \"right\",\n options.lower() ? \"lower\" : \"upper\",\n TriangularSolveOptions_Transpose_Name(options.transpose_a()),\n options.unit_diagonal() ? \"unit\" : \"nonunit\");\n HloModule* module = instruction->GetModule();\n HloComputation*& computation =\n computation_cache_.emplace(name, nullptr).first->second;\n if (!computation) {\n XlaBuilder builder(name);\n XlaOp a = Parameter(&builder, 0, instruction->operand(0)->shape(), \"a\");\n XlaOp b = Parameter(&builder, 1, instruction->operand(1)->shape(), \"b\");\n bool transpose_a =\n options.transpose_a() != TriangularSolveOptions::NO_TRANSPOSE;\n bool conjugate_a = options.transpose_a() == TriangularSolveOptions::ADJOINT;\n BuildTriangularSolve(a, b, options.left_side(), options.lower(),\n transpose_a, conjugate_a, options.unit_diagonal(),\n block_size_,\n PrecisionConfig::HIGHEST);\n TF_ASSIGN_OR_RETURN(XlaComputation xla_computation, builder.Build());\n TF_ASSIGN_OR_RETURN(ProgramShape program_shape,\n xla_computation.GetProgramShape());\n HloModuleConfig config(program_shape);\n TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(\n xla_computation.proto(), config));\n HloCloneContext context(module);\n computation =\n module->DeepCloneComputation(new_module->entry_computation(), &context);\n }\n return instruction->parent()->AddInstruction(HloInstruction::CreateCall(\n instruction->shape(), instruction->operands(), computation));\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/triangular_solve_expander.h\"\n#include \n#include \n#include \"xla/literal.h\"\n#include \"xla/reference_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/types.h\"\nnamespace xla {\nnamespace {\nclass TriangularExpanderTest : public HloTestBase,\n public ::testing::WithParamInterface {};\nTEST_P(TriangularExpanderTest, TestBlockSize) {\n auto block_size = GetParam();\n std::string hlo_string = R\"(\n HloModule TensorFlowTriangularSolve\n ENTRY main {\n a = f32[256,256]{1,0} parameter(0)\n b = f32[256,192]{1,0} parameter(1)\n ROOT triangular-solve = f32[256,192]{1,0} triangular-solve(a, b),\n left_side=true, unit_diagonal=true,\n lower=true, transpose_a=NO_TRANSPOSE\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n {\n TriangularSolveExpander triangular_solve_expander(block_size);\n TF_ASSERT_OK_AND_ASSIGN(\n bool result, RunHloPass(&triangular_solve_expander, module.get()));\n EXPECT_TRUE(result);\n }\n Array2D a(256, 256);\n for (int64_t row = 0; row < a.dim(0); ++row) {\n a(row, row) = 1;\n if (row > 0) {\n a(row, row - 1) = 0.01;\n }\n }\n Array2D b(256, 192);\n const float kMax = static_cast(b.dim(0) * b.dim(1) + 1);\n for (int64_t row = 0; row < b.dim(0); ++row) {\n for (int64_t col = 0; col < b.dim(1); ++col) {\n b(row, col) = static_cast(row + col + 1) / kMax;\n }\n }\n auto la = LiteralUtil::CreateR2FromArray2D(a);\n auto lb = LiteralUtil::CreateR2FromArray2D(b);\n TF_ASSERT_OK_AND_ASSIGN(Literal lx, Execute(std::move(module), {&la, &lb}));\n auto x_shape = lx.shape();\n EXPECT_EQ(x_shape.dimensions_size(), 2);\n EXPECT_EQ(x_shape.dimensions(0), b.dim(0));\n EXPECT_EQ(x_shape.dimensions(1), b.dim(1));\n Array2D x(x_shape.dimensions(0), x_shape.dimensions(1));\n x.SetValues(lx.data());\n auto ref_b = ReferenceUtil::MatmulArray2D(a, x);\n auto ref_lb = LiteralUtil::CreateR2FromArray2D(*ref_b);\n EXPECT_TRUE(\n LiteralTestUtil::NearOrEqual(ref_lb, lb, ErrorSpec{0.001, 0.001}));\n}\nINSTANTIATE_TEST_CASE_P(TriangularExpanderTestInstances, TriangularExpanderTest,\n ::testing::Range(2, 256, 7));\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/triangular_solve_expander.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/triangular_solve_expander_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1157,"cells":{"ID":{"kind":"string","value":"849cb0cf-5da9-4a0b-9798-8ff341a34518"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"transpose_folding"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/transpose_folding.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/transpose_folding_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/transpose_folding.h\"\n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/status.h\"\nnamespace xla {\nnamespace {\nTransposeFolding::OperandIndices CanFoldOperandsIntoConvolution(\n const HloInstruction& convolution,\n const TransposeFolding::TransposableConvOperandsFn&\n transposable_conv_operands) {\n if (HloOpcode::kConvolution != convolution.opcode()) {\n return {};\n }\n TransposeFolding::OperandIndices operand_set;\n for (int64_t i = 0; i < convolution.operand_count(); ++i) {\n auto& operand = *convolution.operand(i);\n if (operand.opcode() == HloOpcode::kTranspose) {\n operand_set.push_back(i);\n }\n }\n return transposable_conv_operands(convolution, operand_set);\n}\nbool IsNonIdentityTranspose(const HloInstruction* instruction) {\n if (instruction->opcode() == HloOpcode::kTranspose) {\n for (int dim = 0; dim < instruction->dimensions().size(); ++dim) {\n if (dim != instruction->dimensions(dim)) {\n return true;\n }\n }\n }\n return false;\n}\nvoid TransposeDims(tsl::protobuf::RepeatedField& dims,\n absl::Span transpose_dims) {\n for (auto& dim : dims) {\n dim = transpose_dims[dim];\n }\n}\nusing InstructionOperandsPair =\n std::pair;\nabsl::Status FoldTransposeIntoDot(InstructionOperandsPair& pair) {\n HloInstruction* dot = pair.first;\n DotDimensionNumbers new_dot_dims = dot->dot_dimension_numbers();\n HloInstruction* lhs = dot->mutable_operand(0);\n HloInstruction* rhs = dot->mutable_operand(1);\n for (int64_t operand_index : pair.second) {\n if (operand_index == 0) {\n TransposeDims(*new_dot_dims.mutable_lhs_contracting_dimensions(),\n lhs->dimensions());\n TransposeDims(*new_dot_dims.mutable_lhs_batch_dimensions(),\n lhs->dimensions());\n lhs = lhs->mutable_operand(0);\n } else {\n CHECK_EQ(operand_index, 1);\n TransposeDims(*new_dot_dims.mutable_rhs_contracting_dimensions(),\n rhs->dimensions());\n TransposeDims(*new_dot_dims.mutable_rhs_batch_dimensions(),\n rhs->dimensions());\n rhs = rhs->mutable_operand(0);\n }\n }\n return dot->parent()->ReplaceWithNewInstruction(\n dot, HloInstruction::CreateDot(dot->shape(), lhs, rhs, new_dot_dims,\n dot->precision_config()));\n}\nbool FoldTransposeIntoConvolution(InstructionOperandsPair& pair) {\n auto& convolution = *pair.first;\n auto& operand_indices = pair.second;\n if (operand_indices.empty()) {\n return false;\n }\n const ConvolutionDimensionNumbers& dnums =\n convolution.convolution_dimension_numbers();\n ConvolutionDimensionNumbers new_dnums = dnums;\n HloInstruction* new_lhs;\n const int64_t kLhsIdx = 0;\n if (absl::c_linear_search(operand_indices, kLhsIdx)) {\n HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx);\n const auto& transpose_dimensions = transpose.dimensions();\n HloInstruction& transpose_operand = *transpose.mutable_operand(0);\n new_dnums.set_input_batch_dimension(\n transpose_dimensions[dnums.input_batch_dimension()]);\n new_dnums.set_input_feature_dimension(\n transpose_dimensions[dnums.input_feature_dimension()]);\n for (auto& input_spatial_dimension :\n *new_dnums.mutable_input_spatial_dimensions()) {\n input_spatial_dimension = transpose_dimensions[input_spatial_dimension];\n }\n new_lhs = &transpose_operand;\n } else {\n new_lhs = convolution.mutable_operand(kLhsIdx);\n }\n HloInstruction* new_rhs;\n const int64_t kRhsIdx = 1;\n if (absl::c_linear_search(operand_indices, kRhsIdx)) {\n HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx);\n const auto& transpose_dimensions = transpose.dimensions();\n HloInstruction& transpose_operand = *transpose.mutable_operand(0);\n new_dnums.set_kernel_input_feature_dimension(\n transpose_dimensions[dnums.kernel_input_feature_dimension()]);\n new_dnums.set_kernel_output_feature_dimension(\n transpose_dimensions[dnums.kernel_output_feature_dimension()]);\n for (auto& kernel_spatial_dimension :\n *new_dnums.mutable_kernel_spatial_dimensions()) {\n kernel_spatial_dimension = transpose_dimensions[kernel_spatial_dimension];\n }\n new_rhs = &transpose_operand;\n } else {\n new_rhs = convolution.mutable_operand(kRhsIdx);\n }\n auto new_conv = HloInstruction::CreateConvolve(\n convolution.shape(), new_lhs, new_rhs, convolution.feature_group_count(),\n convolution.batch_group_count(), convolution.window(), new_dnums,\n convolution.precision_config());\n TF_CHECK_OK(convolution.parent()->ReplaceWithNewInstruction(\n &convolution, std::move(new_conv)));\n return true;\n}\n} \nTransposeFolding::TransposeFolding(\n CanFoldTransposeOperand dot_can_fold_transpose_operand,\n TransposableConvOperandsFn transposable_conv_operands)\n : dot_can_fold_transpose_operand_(\n std::move(dot_can_fold_transpose_operand)),\n transposable_conv_operands_(std::move(transposable_conv_operands)) {}\nabsl::StatusOr TransposeFolding::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n std::vector foldable_dots;\n std::vector foldable_convolutions;\n FunctionVisitor visit_fn([this, &foldable_dots, &foldable_convolutions](\n HloInstruction* instruction) {\n if (instruction->opcode() == HloOpcode::kDot) {\n if ((instruction->operand(0)->shape().rank() < 2) ||\n (instruction->operand(1)->shape().rank() < 2)) {\n return absl::OkStatus();\n }\n OperandIndices operand_indices;\n for (int64_t i = 0; i < 2; ++i) {\n if (!IsNonIdentityTranspose(instruction->operand(i))) {\n continue;\n }\n TF_ASSIGN_OR_RETURN(bool can_fold_operand,\n dot_can_fold_transpose_operand_(*instruction, i));\n if (can_fold_operand) {\n operand_indices.push_back(i);\n }\n }\n if (!operand_indices.empty()) {\n foldable_dots.emplace_back(instruction, operand_indices);\n }\n }\n {\n OperandIndices operand_indices = CanFoldOperandsIntoConvolution(\n *instruction, transposable_conv_operands_);\n if (!operand_indices.empty()) {\n foldable_convolutions.emplace_back(instruction, operand_indices);\n }\n }\n return absl::OkStatus();\n });\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n TF_RETURN_IF_ERROR(comp->Accept(&visit_fn));\n }\n bool changed = false;\n for (InstructionOperandsPair& pair : foldable_dots) {\n TF_RETURN_IF_ERROR(FoldTransposeIntoDot(pair));\n changed = true;\n }\n for (InstructionOperandsPair& pair : foldable_convolutions) {\n changed |= FoldTransposeIntoConvolution(pair);\n }\n return changed;\n}\n absl::StatusOr\nTransposeFolding::IsRowColumnTransposeDotOperand(const HloInstruction& dot,\n int64_t operand_idx) {\n TF_RET_CHECK(dot.opcode() == HloOpcode::kDot);\n TF_RET_CHECK(dot.operand_count() > operand_idx);\n const HloInstruction& transpose = *dot.operand(operand_idx);\n TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose);\n const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();\n auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions()\n : dot_dims.rhs_batch_dimensions();\n auto contracting_dims = (operand_idx == 0)\n ? dot_dims.lhs_contracting_dimensions()\n : dot_dims.rhs_contracting_dimensions();\n return (batch_dims.size() == transpose.shape().rank() - 2) &&\n (contracting_dims.size() == 1) &&\n absl::c_all_of(batch_dims, [&](int64_t dim) {\n return transpose.dimensions(dim) == dim;\n });\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/transpose_folding.h\"\n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal.h\"\n#include \"xla/service/gpu/ir_emission_utils.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/status_matchers.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nusing ::tsl::testing::IsOkAndHolds;\nusing TransposeFoldingTest = HloTestBase;\nTEST_F(TransposeFoldingTest, FoldDotTranspose) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldDotTranspose\nENTRY entry_computation {\n x = f32[2,3]{1,0} parameter(0)\n y = f32[2,3]{1,0} parameter(1)\n transpose = f32[3,2]{1,0} transpose(y), dimensions={1,0}\n ROOT dot = f32[2,2]{1,0} dot(x, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Dot(op::Parameter(0), op::Parameter(1),\n 1, 1));\n}\nTEST_F(TransposeFoldingTest, DontFoldTransposeOfBatchDimByDefault) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldDotTranspose\nENTRY entry_computation {\n x = f32[2,3] parameter(0)\n y = f32[3,2] parameter(1)\n transpose = f32[2,3] transpose(y), dimensions={1,0}\n ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));\n}\nTEST_F(TransposeFoldingTest, FoldTransposeOfBatchWhenPermitted) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldDotTranspose\nENTRY entry_computation {\n x = f32[5,2,3] parameter(0)\n y = f32[3,5,4] parameter(1)\n transpose = f32[5,3,4] transpose(y), dimensions={1,0,2}\n ROOT dot = f32[5,2,4] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n TransposeFolding transpose_folding(\n [](const HloInstruction&, int64_t) {\n return true;\n });\n EXPECT_THAT(transpose_folding.Run(module.get()), IsOkAndHolds(true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Dot(op::Parameter(0), op::Parameter(1),\n 2, 0));\n}\nTEST_F(TransposeFoldingTest, DontFoldTransposeOfRank1Dot) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldDotTranspose\nENTRY entry_computation {\n x = f32[3] parameter(0)\n y = f32[3,2] parameter(1)\n transpose = f32[2,3] transpose(y), dimensions={1,0}\n ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={}, rhs_batch_dims={}, lhs_contracting_dims={0}, rhs_contracting_dims={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));\n}\nTEST_F(TransposeFoldingTest, DontFoldTransposeOfDotWithoutContractingDims) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldDotTranspose\nENTRY entry_computation {\n x = f32[3,4] parameter(0)\n y = f32[3,4,6,7] parameter(1)\n transpose = f32[3,4,7,6] transpose(y), dimensions={0,1,3,2}\n ROOT dot = f32[3,4,7,6] dot(x, transpose), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={}, rhs_contracting_dims={}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));\n}\nTEST_F(TransposeFoldingTest, FoldDotTransposeConstant) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldDotTransposeConstant\nENTRY entry_computation {\n constant = f32[2,1]{1,0} constant({ { 1 }, { 2 } })\n transpose = f32[1,2]{1,0} transpose(constant), dimensions={1,0}\n constant.1 = f32[3,2]{1,0} constant({ { 1, 2 }, { 3, 4 }, { 5, 6 } })\n transpose.1 = f32[2,3]{1,0} transpose(constant.1), dimensions={1,0}\n ROOT dot = f32[1,3]{1,0} dot(transpose, transpose.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Dot(op::Constant(), op::Constant(),\n 0, 1));\n}\nTEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) {\n auto builder = HloComputation::Builder(\"entry\");\n HloInstruction* const1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n HloInstruction* const2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n HloInstruction* const3 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(3.0)));\n HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(\n const1->shape(), HloOpcode::kAdd, const1, const2));\n HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(\n const2->shape(), HloOpcode::kSubtract, const2, const3));\n HloInstruction* mul = builder.AddInstruction(HloInstruction::CreateBinary(\n add->shape(), HloOpcode::kMultiply, add, sub));\n auto module = CreateNewVerifiedModule(\"fuse_with_constant_operands\");\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build(mul));\n HloInstruction* call = module->OutlineExpressionFromComputation(\n {add, sub, mul}, \"entry\", entry_computation);\n EXPECT_EQ(call, entry_computation->root_instruction());\n HloComputation* callee_computation = call->to_apply();\n EXPECT_THAT(call->operands(),\n ::testing::UnorderedElementsAre(const1, const2, const3));\n EXPECT_EQ(6, callee_computation->instruction_count());\n}\nTEST_F(TransposeFoldingTest, FoldDotTransposeInCall) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldDotTransposeInCall\ncallee {\n name.0 = f32[2,3]{1,0} parameter(0)\n name.1 = f32[2,3]{1,0} parameter(1)\n transpose.clone = f32[3,2]{1,0} transpose(name.0), dimensions={1,0}\n ROOT dot.clone = f32[2,2]{1,0} dot(name.1, transpose.clone), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY entry_computation {\n y = f32[2,3]{1,0} parameter(1)\n x = f32[2,3]{1,0} parameter(0)\n ROOT call = f32[2,2]{1,0} call(y, x), to_apply=callee\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n const HloComputation* callee = module->GetComputationWithName(\"callee\");\n ASSERT_NE(callee, nullptr);\n EXPECT_THAT(callee->root_instruction(),\n op::Dot(op::Parameter(1), op::Parameter(0),\n 1, 1));\n}\nTEST_F(TransposeFoldingTest, FoldConvDimSwapTransposeRhs) {\n auto builder = HloComputation::Builder(\"entry_computation\");\n HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),\n \"x\"));\n HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),\n \"y\"));\n HloInstruction* transpose_y =\n builder.AddInstruction(HloInstruction::CreateTranspose(\n ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 0, 2, 3}));\n auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();\n Window window;\n for (int i = 0; i < 2; ++i) {\n WindowDimension* dim = window.add_dimensions();\n dim->set_padding_low(0);\n dim->set_padding_high(0);\n dim->set_base_dilation(1);\n dim->set_window_dilation(1);\n dim->set_stride(1);\n dim->set_size(\n transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));\n }\n absl::StatusOr conv_shape = ShapeInference::InferConvolveShape(\n x->shape(), transpose_y->shape(), 1,\n 1, window, dnums,\n std::nullopt);\n EXPECT_IS_OK(conv_shape);\n HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(\n conv_shape.value(), x, transpose_y,\n 1, 1, window, dnums,\n DefaultPrecisionConfig(2)));\n auto module = CreateNewVerifiedModule(\"test_module\");\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build(conv));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n absl::flat_hash_set instruction_set(\n entry_computation->instructions().begin(),\n entry_computation->instructions().end());\n CHECK_EQ(1, instruction_set.erase(x)) << \"x is not in entry_computation.\";\n CHECK_EQ(1, instruction_set.erase(y)) << \"y is not in entry_computation.\";\n CHECK_EQ(1, instruction_set.size())\n << \"entry_computation should contain exactly 3 instructions.\";\n HloInstruction* new_conv = *instruction_set.begin();\n EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());\n EXPECT_EQ(dnums.kernel_input_feature_dimension(),\n new_conv->convolution_dimension_numbers()\n .kernel_output_feature_dimension());\n EXPECT_EQ(dnums.kernel_output_feature_dimension(),\n new_conv->convolution_dimension_numbers()\n .kernel_input_feature_dimension());\n}\nTEST_F(TransposeFoldingTest, FoldConvComplexTransposeRhs) {\n auto builder = HloComputation::Builder(\"entry_computation\");\n HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),\n \"x\"));\n HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {1, 2, 1, 3}),\n \"y\"));\n HloInstruction* transpose_y =\n builder.AddInstruction(HloInstruction::CreateTranspose(\n ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 3, 0, 2}));\n auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();\n Window window;\n for (int i = 0; i < 2; ++i) {\n WindowDimension* dim = window.add_dimensions();\n dim->set_padding_low(0);\n dim->set_padding_high(0);\n dim->set_base_dilation(1);\n dim->set_window_dilation(1);\n dim->set_stride(1);\n dim->set_size(\n transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));\n }\n absl::StatusOr conv_shape = ShapeInference::InferConvolveShape(\n x->shape(), transpose_y->shape(), 1,\n 1, window, dnums,\n std::nullopt);\n EXPECT_IS_OK(conv_shape);\n HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(\n conv_shape.value(), x, transpose_y,\n 1, 1, window, dnums,\n DefaultPrecisionConfig(2)));\n auto module = CreateNewVerifiedModule(\"test_module\");\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build(conv));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n absl::flat_hash_set instruction_set(\n entry_computation->instructions().begin(),\n entry_computation->instructions().end());\n CHECK_EQ(1, instruction_set.erase(x)) << \"x is not in entry_computation.\";\n CHECK_EQ(1, instruction_set.erase(y)) << \"y is not in entry_computation.\";\n CHECK_EQ(1, instruction_set.size())\n << \"entry_computation should contain exactly 3 instructions.\";\n HloInstruction* new_conv = *instruction_set.begin();\n EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());\n EXPECT_EQ(dnums.kernel_input_feature_dimension(),\n new_conv->convolution_dimension_numbers()\n .kernel_output_feature_dimension());\n EXPECT_EQ(dnums.kernel_spatial_dimensions(1),\n new_conv->convolution_dimension_numbers()\n .kernel_input_feature_dimension());\n EXPECT_EQ(\n dnums.kernel_output_feature_dimension(),\n new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(0));\n EXPECT_EQ(\n dnums.kernel_spatial_dimensions(0),\n new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(1));\n}\nTEST_F(TransposeFoldingTest, FoldConvTransposeLhs) {\n auto builder = HloComputation::Builder(\"entry_computation\");\n HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),\n \"x\"));\n HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),\n \"y\"));\n HloInstruction* transpose_x =\n builder.AddInstruction(HloInstruction::CreateTranspose(\n ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 2, 3}));\n auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();\n Window window;\n for (int i = 0; i < 2; ++i) {\n WindowDimension* dim = window.add_dimensions();\n dim->set_padding_low(0);\n dim->set_padding_high(0);\n dim->set_base_dilation(1);\n dim->set_window_dilation(1);\n dim->set_stride(1);\n dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));\n }\n absl::StatusOr conv_shape = ShapeInference::InferConvolveShape(\n transpose_x->shape(), y->shape(), 1,\n 1, window, dnums,\n std::nullopt);\n EXPECT_IS_OK(conv_shape);\n HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(\n conv_shape.value(), transpose_x, y,\n 1, 1, window, dnums,\n DefaultPrecisionConfig(2)));\n auto module = CreateNewVerifiedModule(\"test_module\");\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build(conv));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n absl::flat_hash_set instruction_set(\n entry_computation->instructions().begin(),\n entry_computation->instructions().end());\n EXPECT_EQ(1, instruction_set.erase(x)) << \"x is not in entry_computation.\";\n EXPECT_EQ(1, instruction_set.erase(y)) << \"y is not in entry_computation.\";\n EXPECT_EQ(1, instruction_set.size())\n << \"entry_computation should contain exactly 3 instructions.\";\n HloInstruction* new_conv = *instruction_set.begin();\n EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());\n EXPECT_EQ(dnums.input_feature_dimension(),\n new_conv->convolution_dimension_numbers().input_batch_dimension());\n EXPECT_EQ(\n dnums.input_batch_dimension(),\n new_conv->convolution_dimension_numbers().input_feature_dimension());\n EXPECT_EQ(\n dnums.input_spatial_dimensions(0),\n new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));\n EXPECT_EQ(\n dnums.input_spatial_dimensions(1),\n new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));\n EXPECT_EQ(\n dnums.output_spatial_dimensions(0),\n new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));\n EXPECT_EQ(\n dnums.output_spatial_dimensions(1),\n new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));\n}\nTEST_F(TransposeFoldingTest, FoldConvComplexTransposeLhs) {\n auto builder = HloComputation::Builder(\"entry_computation\");\n HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),\n \"x\"));\n HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),\n \"y\"));\n HloInstruction* transpose_x =\n builder.AddInstruction(HloInstruction::CreateTranspose(\n ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 3, 2}));\n auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();\n Window window;\n for (int i = 0; i < 2; ++i) {\n WindowDimension* dim = window.add_dimensions();\n dim->set_padding_low(0);\n dim->set_padding_high(0);\n dim->set_base_dilation(1);\n dim->set_window_dilation(1);\n dim->set_stride(1);\n dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));\n }\n absl::StatusOr conv_shape = ShapeInference::InferConvolveShape(\n transpose_x->shape(), y->shape(), 1,\n 1, window, dnums,\n std::nullopt);\n EXPECT_IS_OK(conv_shape);\n HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(\n conv_shape.value(), transpose_x, y,\n 1, 1, window, dnums,\n DefaultPrecisionConfig(2)));\n auto module = CreateNewVerifiedModule(\"test_module\");\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build(conv));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n absl::flat_hash_set instruction_set(\n entry_computation->instructions().begin(),\n entry_computation->instructions().end());\n EXPECT_EQ(1, instruction_set.erase(x)) << \"x is not in entry_computation.\";\n EXPECT_EQ(1, instruction_set.erase(y)) << \"y is not in entry_computation.\";\n EXPECT_EQ(1, instruction_set.size())\n << \"entry_computation should contain exactly 3 instructions.\";\n HloInstruction* new_conv = *instruction_set.begin();\n EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());\n EXPECT_EQ(dnums.input_feature_dimension(),\n new_conv->convolution_dimension_numbers().input_batch_dimension());\n EXPECT_EQ(\n dnums.input_batch_dimension(),\n new_conv->convolution_dimension_numbers().input_feature_dimension());\n EXPECT_EQ(\n dnums.input_spatial_dimensions(0),\n new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));\n EXPECT_EQ(\n dnums.input_spatial_dimensions(1),\n new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));\n EXPECT_EQ(\n dnums.output_spatial_dimensions(0),\n new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));\n EXPECT_EQ(\n dnums.output_spatial_dimensions(1),\n new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));\n}\nTEST_F(TransposeFoldingTest, FoldBatchDotTranspose) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldBatchDotTranspose\nENTRY entry_computation {\n x = f32[7,7,2,3]{3,2,1,0} parameter(0)\n y = f32[7,7,2,3]{3,2,1,0} parameter(1)\n transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,3,2}\n ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},\n rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Dot(op::Parameter(0), op::Parameter(1),\n 3, 3));\n}\nTEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeBatch) {\n constexpr absl::string_view kHloString = R\"(\nHloModule NoFoldBatchDotTransposeBatch\nENTRY entry_computation {\n x = f32[7,7,2,3]{3,2,1,0} parameter(0)\n y = f32[7,7,2,3]{3,2,1,0} parameter(1)\n transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={1,0,3,2}\n ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},\n rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));\n}\nTEST_F(TransposeFoldingTest, FoldBatchDotTransposeNonContiguousBatch) {\n constexpr absl::string_view kHloString = R\"(\nHloModule FoldBatchDotTransposeNonContiguousBatch\nENTRY entry_computation {\n x = f32[7,2,7,3]{3,2,1,0} parameter(0)\n y = f32[7,2,7,3]{3,2,1,0} parameter(1)\n transpose = f32[7,3,7,2]{3,2,1,0} transpose(y), dimensions={0,3,2,1}\n ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},\n rhs_contracting_dims={1}, lhs_batch_dims={0,2}, rhs_batch_dims={0,2}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Dot(op::Parameter(0), op::Parameter(1),\n 3, 3));\n}\nTEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeIdentity) {\n constexpr absl::string_view kHloString = R\"(\nHloModule NoFoldBatchDotTransposeIdentity\nENTRY entry_computation {\n x = f32[7,7,2,3]{3,2,1,0} parameter(0)\n y = f32[7,7,3,2]{3,2,1,0} parameter(1)\n transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,2,3}\n ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},\n rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloString));\n EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1158,"cells":{"ID":{"kind":"string","value":"cfec981d-552c-4da4-9a2f-8bcbd2723fba"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reshape_mover"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/reshape_mover.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/reshape_mover_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/reshape_mover.h\"\n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"xla/permutation_util.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nnamespace {\nbool IsRearrange(const HloInstruction* instruction) {\n return instruction->opcode() == HloOpcode::kReshape ||\n instruction->opcode() == HloOpcode::kTranspose;\n}\nbool AreEquivalentRearranges(const HloInstruction* a, const HloInstruction* b) {\n if (a->opcode() != b->opcode() ||\n !ShapeUtil::SameDimensions(a->shape(), b->shape())) {\n return false;\n }\n switch (a->opcode()) {\n case HloOpcode::kTranspose:\n return a->dimensions() == b->dimensions();\n case HloOpcode::kReshape:\n return ShapeUtil::SameDimensions(a->operand(0)->shape(),\n b->operand(0)->shape());\n default:\n return false;\n }\n}\nabsl::InlinedVector TransposedBcastDims(\n absl::Span bcast_dims,\n absl::Span transpose_dims) {\n auto inv_perm = InversePermutation(transpose_dims);\n absl::InlinedVector new_bcast_dims;\n for (int64_t dim : bcast_dims) {\n new_bcast_dims.push_back(inv_perm[dim]);\n }\n return new_bcast_dims;\n}\n} \nbool ReshapeMover::CanTriviallyRearrange(const HloInstruction* instr,\n const HloInstruction* rearrange) {\n CHECK(IsRearrange(rearrange)) << rearrange->ToString();\n if (rearrange->opcode() == HloOpcode::kReshape &&\n ShapeUtil::Equal(rearrange->shape(), rearrange->operand(0)->shape())) {\n return true;\n }\n if (rearrange->opcode() == HloOpcode::kTranspose &&\n IsIdentityPermutation(rearrange->dimensions())) {\n return true;\n }\n if (instr->opcode() == HloOpcode::kConstant) {\n return true;\n }\n if (instr->opcode() == HloOpcode::kRng && instr->user_count() == 1) {\n return true;\n }\n if (instr->opcode() == HloOpcode::kBroadcast) {\n if (!absl::c_is_sorted(instr->dimensions())) {\n return false;\n }\n if (rearrange->opcode() == HloOpcode::kReshape) {\n return ShapeUtil::IsScalar(instr->operand(0)->shape()) ||\n (options_.reshape_of_1d_broadcast_is_cheap &&\n ShapeUtil::TrueRank(instr->operand(0)->shape()) <= 1) ||\n (options_.reshape_of_1d_broadcast_is_cheap &&\n ShapeUtil::ReshapeLeavesDimensionsUnmodified(\n rearrange->shape(),\n rearrange->operand(0)->shape(),\n instr->dimensions())\n .has_value());\n }\n if (rearrange->opcode() == HloOpcode::kTranspose) {\n return absl::c_is_sorted(TransposedBcastDims(\n instr->dimensions(), InversePermutation(rearrange->dimensions())));\n }\n }\n return false;\n}\nconst HloInstruction* ReshapeMover::FirstNontrivialRearrange(\n absl::Span instrs) {\n auto rearrange_it = absl::c_find_if(instrs, [&](const HloInstruction* instr) {\n return IsRearrange(instr) &&\n !CanTriviallyRearrange(instr->operand(0), instr);\n });\n if (rearrange_it == instrs.end()) {\n return nullptr;\n }\n return *rearrange_it;\n}\nbool ReshapeMover::IsReshapeMoveCandidate(HloInstruction* instruction) {\n auto print_no_metadata = HloPrintOptions().set_print_metadata(false);\n VLOG(5) << \"** Checking instruction: \"\n << instruction->ToString(print_no_metadata);\n if (!instruction->IsElementwise()) {\n return false;\n }\n const HloInstruction* rearrange =\n FirstNontrivialRearrange(instruction->operands());\n if (rearrange == nullptr) {\n return false;\n }\n return absl::c_all_of(\n instruction->operands(), [&](const HloInstruction* operand) {\n return (IsRearrange(operand) &&\n AreEquivalentRearranges(operand, rearrange)) ||\n (!IsRearrange(operand) &&\n CanTriviallyRearrange(operand, rearrange));\n });\n}\nabsl::StatusOr ReshapeMover::ApplyInverseRearrange(\n const HloInstruction* rearrange, HloInstruction* operand) {\n switch (rearrange->opcode()) {\n case HloOpcode::kReshape: {\n Shape new_shape = ShapeUtil::ChangeElementType(\n rearrange->operand(0)->shape(), operand->shape().element_type());\n if (operand->shape() != new_shape) {\n return MakeReshapeHlo(new_shape, operand);\n } else {\n return operand;\n }\n }\n case HloOpcode::kTranspose: {\n if (!IsIdentityPermutation(rearrange->dimensions())) {\n return MakeTransposeHlo(operand,\n InversePermutation(rearrange->dimensions()));\n } else {\n return operand;\n }\n }\n default:\n LOG(FATAL) << \"Invalid rearrange op: \" << rearrange->ToString();\n }\n}\nabsl::StatusOr ReshapeMover::SinkRearrangeOperands(\n HloInstruction* instruction) {\n auto print_no_metadata = HloPrintOptions().set_print_metadata(false);\n HloComputation* computation = instruction->parent();\n const HloInstruction* rearrange =\n FirstNontrivialRearrange(instruction->operands());\n CHECK(rearrange != nullptr);\n const Shape& new_operand_shape = rearrange->operand(0)->shape();\n VLOG(3) << \"** Sinking reshape or transpose: \"\n << instruction->ToString(print_no_metadata)\n << \"\\n\\tfirst rearrange operand: \"\n << rearrange->ToString(print_no_metadata) \n << \"\\n\\tnew operand shape: \"\n << ShapeUtil::HumanString(new_operand_shape);\n auto operands = instruction->operands();\n for (size_t i = 0; i < operands.size(); ++i) {\n VLOG(3) << \"Updating operand #\" << i << \": \"\n << operands[i]->ToString(print_no_metadata);\n TF_ASSIGN_OR_RETURN(operands[i],\n ApplyInverseRearrange(rearrange, operands[i]));\n VLOG(3) << \"Updated operand #\" << i\n << \" to: \" << operands[i]->ToString(print_no_metadata);\n }\n HloInstruction* new_elementwise =\n computation->AddInstruction(instruction->CloneWithNewOperands(\n ShapeUtil::ChangeElementType(new_operand_shape,\n instruction->shape().element_type()),\n operands));\n std::unique_ptr new_rearrange;\n switch (rearrange->opcode()) {\n case HloOpcode::kReshape:\n VLOG(3) << \"Creating new reshape for new elementwise op: \"\n << new_elementwise->ToString(print_no_metadata);\n new_rearrange =\n HloInstruction::CreateReshape(instruction->shape(), new_elementwise);\n break;\n case HloOpcode::kTranspose:\n new_rearrange = HloInstruction::CreateTranspose(\n instruction->shape(), new_elementwise, rearrange->dimensions());\n break;\n default:\n LOG(FATAL) << \"Bad opcode\";\n }\n if (instruction->has_sharding()) {\n new_elementwise->clear_sharding();\n }\n TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(\n instruction, std::move(new_rearrange)));\n return true;\n}\nabsl::StatusOr ReshapeMover::TryReshapeMoveOnCandidates(\n HloInstructionSet* candidates) {\n bool removed = true;\n while (!candidates->empty() && removed) {\n if (VLOG_IS_ON(5)) {\n for (const HloInstruction* instruction : *candidates) {\n VLOG(5) << \"candidate \" << instruction->ToString();\n }\n }\n ConstHloInstructionSet rearrange_operands;\n for (const HloInstruction* instruction : *candidates) {\n for (const auto* operand : instruction->operands()) {\n if (IsRearrange(operand)) {\n rearrange_operands.insert(operand);\n }\n }\n }\n removed = false;\n for (auto operand : rearrange_operands) {\n if (absl::c_any_of(operand->users(), [&](HloInstruction* user) {\n return !candidates->count(user);\n })) {\n for (auto* user : operand->users()) {\n removed |= candidates->erase(user) > 0;\n }\n }\n }\n }\n if (candidates->empty()) {\n return false;\n }\n for (HloInstruction* instruction : *candidates) {\n if (!ConsumeFuel(\"reshape-mover\", [&] {\n return absl::StrCat(\"instruction: \", instruction->ToString(),\n \"\\nFull module:\\n\",\n instruction->GetModule()->ToString());\n })) {\n break;\n }\n TF_ASSIGN_OR_RETURN(bool did_change, SinkRearrangeOperands(instruction));\n CHECK(did_change);\n }\n return true;\n}\nabsl::StatusOr ReshapeMover::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n HloInstructionSet candidates;\n for (HloInstruction* instruction : comp->instructions()) {\n if (IsReshapeMoveCandidate(instruction)) {\n candidates.insert(instruction);\n }\n }\n TF_ASSIGN_OR_RETURN(bool did_change,\n TryReshapeMoveOnCandidates(&candidates));\n changed |= did_change;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/reshape_mover.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/pass/hlo_pass_fix.h\"\n#include \"xla/service/algebraic_simplifier.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\nnamespace xla {\nnamespace {\nnamespace m = xla::match;\nclass ReshapeMoverTest : public HloTestBase {\n protected:\n absl::Status RunPass(HloModule* module, bool change_expected,\n ReshapeMoverOptions options = ReshapeMoverOptions{}) {\n TF_ASSIGN_OR_RETURN(bool changed,\n RunHloPass(ReshapeMover(options), module));\n SCOPED_TRACE(module->ToString());\n EXPECT_EQ(changed, change_expected);\n TF_EXPECT_OK(RunHloPass(HloVerifier(HloVerifierOpts()), module).status());\n TF_EXPECT_OK(RunHloPass(HloPassFix(\n AlgebraicSimplifierOptions()),\n module)\n .status());\n return absl::OkStatus();\n }\n};\nTEST_F(ReshapeMoverTest, ReshapesWithDifferentInputShapesNotMoved) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))\n reshape1 = f32[8,7] reshape(f32[1,8,7,1] parameter(1))\n ROOT add = add(reshape0, reshape1)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, OneConstantAndOneReshapesOnRngNotMoved) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n rng = f32[1,8,1,7,1] rng(f32[] constant(0), f32[] constant(1)), distribution=rng_uniform\n ROOT add = add(f32[8,7] reshape(rng), f32[8,7] constant({...}))\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, EquivalentReshapesMoved) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))\n reshape1 = f32[8,7] reshape(f32[1,8,1,7] parameter(1))\n ROOT add = f32[8,7] add(reshape0, reshape1)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1)))));\n}\nTEST_F(ReshapeMoverTest, SinkReshapeBelowSelect) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n ROOT select = f32[2,3] select(\n pred[2,3] reshape(pred[6] parameter(0)),\n f32[2,3] reshape(f32[6] parameter(1)),\n f32[2,3] reshape(f32[6] parameter(2)))\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),\n m::Parameter(2)))));\n}\nTEST_F(ReshapeMoverTest, SinkReshapeBelowSelectWithConstant) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n ROOT select = f32[2,3] select(\n pred[2,3] reshape(pred[6] parameter(0)),\n f32[2,3] reshape(f32[6] parameter(1)),\n f32[2,3] constant({...}))\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1),\n m::Reshape(m::Constant())))));\n}\nTEST_F(ReshapeMoverTest, OneParameterAndOneReshapeNotMoved) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0))\n ROOT add = add(reshape0, f32[8,7] parameter(1))\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, DontSinkReshapesOfConstants) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n ROOT select = select(\n pred[3,2] parameter(0),\n f32[3,2] reshape(f32[2,3] constant({...})),\n f32[3,2] reshape(f32[2,3] constant({...})))\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, OneNontrivialReshapeMoved) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n ROOT add = add(\n f32[3,2] reshape(f32[2,3] parameter(0)),\n f32[3,2] constant({...}))\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(\n m::Add(m::Parameter(0), m::Reshape(m::Constant())))));\n}\nTEST_F(ReshapeMoverTest, MultipleReshapes) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n add0 = f32[8,7,1] add(\n f32[8,7,1] reshape(f32[1,8,1,7] parameter(0)),\n f32[8,7,1] reshape(f32[1,8,1,7] parameter(1)))\n ROOT add1 = f32[8,7] add(\n f32[8,7] reshape(add0),\n f32[8,7] reshape(f32[8,7,1] parameter(2)))\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(\n m::Add(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1))),\n m::Parameter(2)))));\n}\nTEST_F(ReshapeMoverTest, SinkTransposeAcrossBroadcastScalar) {\n const std::string hlo_string = R\"(\n HloModule TransposeMulInversedTransposeModule\n ENTRY TransposeMulInversedTranspose {\n src0 = f32[20,8]{1,0} parameter(0)\n transpose0 = f32[8,20]{1,0} transpose(src0), dimensions={1,0}\n src1 = f32[] parameter(1)\n broadcast0 = f32[8,20]{1,0} broadcast(src1), dimensions={}\n ROOT multiply0 = f32[8,20]{1,0} multiply(transpose0, broadcast0)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Transpose(m::Multiply(\n m::Parameter(0), m::Broadcast(m::Parameter(1))))));\n}\nTEST_F(ReshapeMoverTest, ReshapeWithUsersOutsideCandidatesNotSink) {\n const std::string hlo_string = R\"(\n HloModule ReshapeWithUsersOutsideCandidates\n ENTRY ReshapeWithMultipleUsers {\n param0 = f32[20,8]{1,0} parameter(0)\n reshape0 = f32[8,20]{1,0} reshape(param0)\n param1 = f32[] parameter(1)\n broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}\n param2 = f32[20,8]{1,0} parameter(2)\n reshape1 = f32[8,20]{1,0} reshape(param2)\n param3 = f32[20,8]{1,0} parameter(3)\n reshape2 = f32[8,20]{1,0} reshape(param3)\n param4 = f32[8,20]{1,0} parameter(4)\n add0 = f32[8,20]{1,0} add(reshape0, broadcast0)\n add1 = f32[8,20]{1,0} add(reshape0, reshape1)\n add2 = f32[8,20]{1,0} add(reshape1, param4)\n ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},\n f32[8,20]{1,0}) tuple(add0, add1, add2)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink1) {\n const std::string hlo_string = R\"(\n HloModule ReshapeNoUsersOutsideCandidates1\n ENTRY ReshapeWithMultipleUsers1 {\n param0 = f32[20,8]{1,0} parameter(0)\n reshape0 = f32[8,20]{1,0} reshape(param0)\n param1 = f32[] parameter(1)\n broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={}\n param2 = f32[20,8]{1,0} parameter(2)\n reshape1 = f32[8,20]{1,0} reshape(param2)\n param3 = f32[20,8]{1,0} parameter(3)\n reshape2 = f32[8,20]{1,0} reshape(param3)\n add0 = f32[8,20]{1,0} add(reshape0, broadcast0)\n add1 = f32[8,20]{1,0} add(reshape0, reshape1)\n add2 = f32[8,20]{1,0} add(reshape1, reshape2)\n ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0},\n f32[8,20]{1,0}) tuple(add0, add1, add2)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(\n m->entry_computation()->root_instruction(),\n GmockMatch(m::Tuple(\n m::Reshape(m::Add(m::Parameter(0), m::Broadcast(m::Parameter(1)))),\n m::Reshape(m::Add(m::Parameter(0), m::Parameter(2))),\n m::Reshape(m::Add(m::Parameter(2), m::Parameter(3))))));\n}\nTEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink2) {\n const std::string hlo_string = R\"(\n HloModule ReshapeNoUsersOutsideCandidates2\n ENTRY ReshapeWithMultipleUsers2 {\n param0 = f32[20,8]{1,0} parameter(0)\n reshape0 = f32[8,20]{1,0} reshape(param0)\n ROOT add0 = f32[8,20]{1,0} add(reshape0, reshape0)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(m::Add())));\n}\nTEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsNotTrivial) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}\n b = f32[2,3] reshape(f32[6] parameter(1))\n ROOT add0 = add(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsTrivial) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}\n b = f32[2,3] reshape(f32[6] parameter(1))\n ROOT add0 = add(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n ReshapeMoverOptions options;\n options.reshape_of_1d_broadcast_is_cheap = true;\n TF_ASSERT_OK(RunPass(m.get(), true, options));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(\n m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(\n m::Add(m::Reshape(m::Broadcast(m::Parameter(0))), m::Parameter(1)))));\n}\nTEST_F(ReshapeMoverTest, ReshapeOfRank2BroadcastIsAllowed) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}\n b = f32[2,3,35] reshape(f32[2,3,5,7] parameter(1))\n ROOT add0 = add(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n ReshapeMoverOptions options;\n options.reshape_of_1d_broadcast_is_cheap = true;\n TF_ASSERT_OK(RunPass(m.get(), true, options));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Reshape(\n m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));\n}\nTEST_F(ReshapeMoverTest, SinkDisallowedIfReshapeChangesBroadcastDims) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1}\n b = f32[2,3,35] reshape(f32[6,5,7] parameter(1))\n ROOT add0 = add(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, TransposeOfBroadcastIsAllowed) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0}\n b = f32[2,3] transpose(f32[3,2] parameter(1)), dimensions={1,0}\n ROOT add0 = add(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n SCOPED_TRACE(m->ToString());\n EXPECT_THAT(m->entry_computation()->root_instruction(),\n GmockMatch(m::Transpose(\n m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1)))));\n}\nTEST_F(ReshapeMoverTest, TransposeReordersBroadcastDims) {\n const std::string hlo_string = R\"(\n HloModule test\n ENTRY test {\n a = f32[2,3,5] broadcast(f32[2,3] parameter(0)), dimensions={0,1}\n b = f32[2,3,5] transpose(f32[3,2,5] parameter(1)), dimensions={1,0,2}\n ROOT add0 = add(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), false));\n}\nTEST_F(ReshapeMoverTest, ShardingConsistencyPreservation) {\n const std::string hlo_string = R\"(\n HloModule module\n ENTRY entry {\n copy.2424 = bf16[3,16,128]{2,1,0} parameter(0), sharding={replicated}\n dot.987 = bf16[3,16,128,4096]{3,2,1,0} parameter(1), sharding={devices=[1,8,1,1]0,1,2,3,4,5,6,7}\n reshape.5843 = bf16[3,16,128,1,4096]{4,3,2,1,0} reshape(dot.987), sharding={devices=[1,8,1,1,1]0,1,2,3,4,5,6,7}\n transpose.21172 = bf16[3,1,4096,16,128]{2,1,4,3,0} transpose(reshape.5843), dimensions={0,3,4,1,2}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}\n reshape.291 = bf16[3,16,128]{2,1,0} reshape(copy.2424), sharding={devices=[1,8,1]0,1,2,3,4,5,6,7}\n broadcast.21176 = bf16[3,1,4096,16,128]{4,3,2,1,0} broadcast(reshape.291), dimensions={0,3,4}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}\n multiply.21177 = bf16[3,1,4096,16,128]{2,1,4,3,0} multiply(transpose.21172, broadcast.21176), sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}\n ROOT slice.21180 = bf16[1,1,4096,16,128]{4,3,2,1,0} slice(multiply.21177), slice={[1:2], [0:1], [0:4096], [0:16], [0:128]}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK(RunPass(m.get(), true));\n auto elementwise_op = FindInstruction(m.get(), HloOpcode::kMultiply);\n EXPECT_FALSE(elementwise_op->has_sharding());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1159,"cells":{"ID":{"kind":"string","value":"cabe3974-40bd-4f00-8029-9f686f21bb85"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_phi_graph"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/hlo_phi_graph.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/hlo_phi_graph_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/hlo_phi_graph.h\"\n#include \nnamespace xla {\nHloValue::Id PhiGraph::GetOptimizedId(const HloValue& value) {\n Node* node = value_id_to_node_[value.id()];\n CHECK(!node->mark_as_dead);\n return node->value_id;\n}\nbool PhiGraph::InputsEqualTo(const HloValue& value,\n absl::Span inputs) {\n auto iter = value_id_to_node_.find(value.id());\n CHECK(iter != value_id_to_node_.end());\n absl::flat_hash_set existing_set;\n for (Node* operand : iter->second->operands) {\n existing_set.insert(operand->value_id);\n }\n absl::flat_hash_set new_set;\n for (const HloValue* input : inputs) {\n new_set.insert(input->id());\n }\n return existing_set == new_set;\n}\nHloValue::Id PhiGraph::FindOptimizedValue(const HloValue::Id id) {\n auto iter = value_id_to_node_.find(id);\n CHECK(iter != value_id_to_node_.end());\n CHECK(!iter->second->mark_as_dead);\n return iter->second->value_id;\n}\nPhiGraph::Node* PhiGraph::CreateOrReuseNode(const HloValue& value) {\n auto iter = value_id_to_node_.find(value.id());\n if (iter == value_id_to_node_.end()) {\n node_storage_.emplace_back(std::make_unique());\n Node* node = node_storage_.back().get();\n node->value_id = value.id();\n value_id_to_node_[value.id()] = node;\n node_to_value_id_[node].push_back(value.id());\n return node;\n } else {\n CHECK_NE(iter->second, nullptr);\n CHECK_EQ(iter->second->value_id, value.id());\n return iter->second;\n }\n}\nvoid PhiGraph::ReplaceNodeWith(PhiGraph::Node* node, PhiGraph::Node* replace) {\n CHECK(node->is_phi);\n if (node->mark_as_dead) {\n return;\n }\n if (replace->mark_as_dead) {\n auto iter = value_id_to_node_.find(replace->value_id);\n CHECK(iter != value_id_to_node_.end());\n return ReplaceNodeWith(node, iter->second);\n }\n CHECK(!replace->mark_as_dead);\n for (Node* user : node->users) {\n absl::c_replace(user->operands, node, replace);\n }\n for (Node* operand : node->operands) {\n absl::c_replace(operand->users, node, replace);\n }\n for (HloValue::Id value_id : node_to_value_id_[node]) {\n CHECK(value_id_to_node_.contains(value_id));\n value_id_to_node_[value_id] = replace;\n }\n absl::c_copy(node_to_value_id_[node],\n std::back_inserter(node_to_value_id_[replace]));\n node_to_value_id_[node].clear();\n node->mark_as_dead = true;\n}\nvoid PhiGraph::RegisterPhi(const HloValue& value,\n absl::Span inputs) {\n Node* node = CreateOrReuseNode(value);\n CHECK(value.is_phi());\n node->is_phi = true;\n node->operands.clear();\n for (auto input : inputs) {\n CHECK(input != nullptr);\n Node* input_node = CreateOrReuseNode(*input);\n node->operands.push_back(input_node);\n }\n}\nstd::string PhiGraph::ToString() {\n std::string out = \"PhiGraph: \\n\";\n for (auto& node : node_storage_) {\n absl::StrAppend(&out, node->value_id);\n if (node->is_phi) {\n absl::StrAppend(&out, \", phi\");\n }\n if (node->mark_as_dead) {\n absl::StrAppend(&out, \", dead\", \":\\n\");\n }\n for (Node* input : node->operands) {\n absl::StrAppend(&out, \" \", input->value_id, \"\\n\");\n }\n }\n return out;\n}\nvoid PhiGraph::Optimize() {\n VLOG(2) << \"Optimizing phi graph:\";\n XLA_VLOG_LINES(2, ToString());\n for (auto& node : node_storage_) {\n for (Node* input : node->operands) {\n input->users.push_back(node.get());\n }\n }\n bool changed = true;\n while (changed) {\n changed = false;\n absl::flat_hash_set checked_for_closure;\n for (auto& node : node_storage_) {\n if (!node->is_phi) {\n continue;\n }\n if (node->mark_as_dead) {\n continue;\n }\n Node* node_ptr = node.get();\n VLOG(2) << \"Optimizing: \" << node_ptr->value_id;\n CHECK_GE(node_ptr->operands.size(), 1);\n auto it = absl::c_find(node_ptr->operands, node_ptr);\n while (it != node_ptr->operands.end()) {\n node_ptr->operands.erase(it);\n it = absl::c_find(node_ptr->operands, node_ptr);\n }\n it = absl::c_find(node_ptr->users, node_ptr);\n while (it != node_ptr->users.end()) {\n node_ptr->users.erase(it);\n it = absl::c_find(node_ptr->users, node_ptr);\n }\n CHECK_GE(node_ptr->operands.size(), 1);\n bool all_inputs_are_same = absl::c_all_of(\n node_ptr->operands,\n [&](Node* elem) { return elem == node_ptr->operands[0]; });\n if (all_inputs_are_same) {\n VLOG(1) << \"All inputs to node \" << node_ptr->value_id\n << \" are the same, replacing it with \"\n << node_ptr->operands[0]->value_id;\n ReplaceNodeWith(node_ptr, node_ptr->operands[0]);\n changed = true;\n continue;\n }\n if (checked_for_closure.contains(node_ptr)) {\n continue;\n }\n absl::flat_hash_set workset;\n std::queue worklist;\n Node* non_phi = nullptr;\n worklist.push(node_ptr);\n while (!worklist.empty()) {\n Node* todo = worklist.front();\n worklist.pop();\n if (workset.contains(todo)) {\n continue;\n }\n checked_for_closure.insert(todo);\n workset.insert(todo);\n for (Node* operand : todo->operands) {\n worklist.push(operand);\n }\n if (!todo->is_phi) {\n if (non_phi != nullptr && non_phi != todo) {\n non_phi = nullptr;\n break;\n } else {\n non_phi = todo;\n }\n }\n }\n if (non_phi != nullptr) {\n for (Node* node : workset) {\n if (!node->is_phi) {\n CHECK_EQ(node, non_phi);\n continue;\n }\n VLOG(1) << \"Replace node \" << node->value_id\n << \" in the closure with node \" << non_phi->value_id;\n ReplaceNodeWith(node, non_phi);\n changed = true;\n }\n }\n }\n }\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_phi_graph.h\"\n#include \"xla/literal_util.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace {\nclass PhiGraphTest : public ::testing::Test {\n protected:\n HloValue NewHloValue(bool is_phi) {\n static int64_t id = 0;\n return HloValue(id++, dummy_inst_.get(), {}, is_phi);\n }\n void SetUp() override {\n dummy_inst_ = HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f));\n }\n std::unique_ptr dummy_inst_;\n};\nTEST_F(PhiGraphTest, SelfReferencingPhi) {\n PhiGraph phi_graph;\n HloValue A = NewHloValue(false);\n HloValue B = NewHloValue(true);\n phi_graph.RegisterPhi(B, {&A, &B});\n phi_graph.Optimize();\n EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));\n}\nTEST_F(PhiGraphTest, PhiWithSameInputs) {\n PhiGraph phi_graph;\n HloValue A = NewHloValue(false);\n HloValue B = NewHloValue(true);\n phi_graph.RegisterPhi(B, {&A, &A});\n phi_graph.Optimize();\n EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id()));\n}\nTEST_F(PhiGraphTest, CircularPhi) {\n PhiGraph phi_graph;\n HloValue A = NewHloValue(true);\n HloValue B = NewHloValue(true);\n HloValue C = NewHloValue(true);\n HloValue D = NewHloValue(false);\n phi_graph.RegisterPhi(A, {&B, &C});\n phi_graph.RegisterPhi(B, {&D, &C});\n phi_graph.RegisterPhi(C, {&A, &B});\n phi_graph.Optimize();\n EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));\n EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));\n EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));\n}\nTEST_F(PhiGraphTest, NestedPhiReduction) {\n PhiGraph phi_graph;\n HloValue A = NewHloValue(true);\n HloValue B = NewHloValue(true);\n HloValue C = NewHloValue(true);\n HloValue D = NewHloValue(false);\n HloValue E = NewHloValue(true);\n phi_graph.RegisterPhi(A, {&B, &C});\n phi_graph.RegisterPhi(B, {&E, &C});\n phi_graph.RegisterPhi(C, {&A, &B});\n phi_graph.RegisterPhi(E, {&D, &D});\n phi_graph.Optimize();\n EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id()));\n EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id()));\n EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id()));\n EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(E.id()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1160,"cells":{"ID":{"kind":"string","value":"eb0c36ad-0dad-4007-b2bb-20fde4559604"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"tuple_util"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/tuple_util.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/tuple_util_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/tuple_util.h\"\n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/hlo_value.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_tree.h\"\n#include \"xla/shape_util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\n HloInstruction* TupleUtil::ExtractPrefix(HloInstruction* input_tuple,\n int64_t elements,\n absl::string_view name) {\n CHECK(input_tuple->shape().IsTuple());\n HloComputation* computation = input_tuple->parent();\n const Shape& input_shape = input_tuple->shape();\n std::vector tuple_elements;\n tuple_elements.reserve(elements);\n for (int i = 0; i < elements; i++) {\n std::string element_name;\n if (!name.empty()) {\n element_name = absl::StrCat(name, \".element.\", i);\n }\n tuple_elements.push_back(computation->AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape.tuple_shapes(i),\n input_tuple, i),\n element_name));\n }\n return computation->AddInstruction(\n HloInstruction::CreateTuple(tuple_elements), name);\n}\n HloInstruction* TupleUtil::AppendSuffix(\n HloInstruction* input_tuple,\n absl::Span trailing_values) {\n CHECK(input_tuple->shape().IsTuple());\n HloComputation* computation = input_tuple->parent();\n const Shape& input_shape = input_tuple->shape();\n std::vector tuple_elements;\n tuple_elements.reserve(input_shape.tuple_shapes_size());\n for (int i = 0; i < input_shape.tuple_shapes_size(); i++) {\n tuple_elements.push_back(\n computation->AddInstruction(HloInstruction::CreateGetTupleElement(\n input_shape.tuple_shapes(i), input_tuple, i)));\n }\n tuple_elements.insert(tuple_elements.end(), trailing_values.begin(),\n trailing_values.end());\n return computation->AddInstruction(\n HloInstruction::CreateTuple(tuple_elements));\n}\n absl::StatusOr TupleUtil::ReplaceTupleWith(\n HloInstruction* new_instruction, HloInstruction* tuple,\n ShapeIndex shape_index, bool insert_bitcast_if_different_shape) {\n const Shape& tuple_shape = tuple->shape();\n CHECK(tuple->shape().IsTuple())\n << \"ReplaceTupleWith was called for a non-tuple. Tuple = \"\n << tuple->ToString()\n << \", new_instruction = \" << new_instruction->ToString()\n << \", shape_index = \" << shape_index.ToString();\n const HloInstruction* instruction = new_instruction;\n bool equivalent = true;\n for (int i = shape_index.size() - 1; i >= 0; --i) {\n int index = shape_index[i];\n if (instruction->opcode() != HloOpcode::kGetTupleElement ||\n instruction->tuple_index() != index) {\n equivalent = false;\n break;\n }\n instruction = instruction->operand(0);\n }\n if (equivalent && instruction == tuple) {\n VLOG(4) << \"Instruction \" << new_instruction->ToShortString()\n << \" already exists at index \" << shape_index.ToString() << \" of \"\n << tuple->ToShortString();\n return tuple;\n }\n HloComputation* computation = new_instruction->parent();\n std::vector tuple_args(tuple_shape.tuple_shapes_size());\n CHECK_GE(tuple_shape.tuple_shapes_size(), shape_index[0]);\n for (int i = 0; i < tuple_shape.tuple_shapes_size(); ++i) {\n const Shape& subshape = tuple_shape.tuple_shapes(i);\n auto get_operand = [&]() {\n if (tuple->opcode() == HloOpcode::kTuple) {\n return tuple->mutable_operand(i);\n } else {\n return computation->AddInstruction(\n HloInstruction::CreateGetTupleElement(subshape, tuple, i));\n }\n };\n if (i == shape_index[0]) {\n if (subshape.IsTuple()) {\n TF_ASSIGN_OR_RETURN(tuple_args[i],\n ReplaceTupleWith(new_instruction, get_operand(),\n ShapeIndex(shape_index.begin() + 1,\n shape_index.end())));\n } else {\n if (subshape != new_instruction->shape() &&\n insert_bitcast_if_different_shape) {\n VLOG(4) << \"Old shape = \" << subshape.ToString()\n << \", new shape = \" << new_instruction->shape().ToString()\n << \"; inserting a bitcast.\";\n new_instruction = computation->AddInstruction(\n HloInstruction::CreateBitcast(subshape, new_instruction));\n } else if (tuple->opcode() == HloOpcode::kTuple &&\n tuple->operand(i) == new_instruction) {\n VLOG(4) << \"Tuple already contains the new instruction = \"\n << new_instruction->ToShortString()\n << \" tuple = \" << tuple->ToShortString();\n return tuple;\n }\n tuple_args[i] = new_instruction;\n }\n } else {\n tuple_args[i] = get_operand();\n }\n }\n if (shape_index[0] == tuple_shape.tuple_shapes_size()) {\n tuple_args.push_back(new_instruction);\n }\n return computation->AddInstruction(HloInstruction::CreateTuple(tuple_args));\n}\n HloInstruction* TupleUtil::AddGetTupleElements(\n const HloPosition& position) {\n HloInstruction* instruction = position.instruction;\n HloComputation* computation = instruction->parent();\n for (int64_t index : position.index) {\n auto gte_it = absl::c_find_if(\n instruction->users(), [index](const HloInstruction* use) {\n return use != use->parent()->root_instruction() &&\n use->opcode() == HloOpcode::kGetTupleElement &&\n use->tuple_index() == index;\n });\n if (gte_it != instruction->users().end()) {\n instruction = *gte_it;\n } else {\n instruction =\n computation->AddInstruction(HloInstruction::CreateGetTupleElement(\n instruction->shape().tuple_shapes(index), instruction, index));\n }\n }\n return instruction;\n}\nShapeTree TupleUtil::DisassembleTupleInstruction(\n HloInstruction* tuple) {\n const Shape& shape = tuple->shape();\n ShapeTree result(shape);\n result.ForEachMutableElement([&](ShapeIndexView index,\n HloInstruction** element) {\n if (index.empty()) {\n *element = tuple;\n } else {\n ShapeIndexView parent_index = index.subspan(0, index.size() - 1);\n HloInstruction* parent = result.element(parent_index);\n std::string name = absl::StrCat(tuple->name(), \".disassembled.\",\n absl::StrJoin(index, \".\"));\n *element = tuple->parent()->AddInstruction(\n HloInstruction::CreateGetTupleElement(parent, index.back()), name);\n }\n });\n return result;\n}\nHloInstruction* TupleUtil::AssembleTupleInstruction(\n HloComputation* computation, ShapeTree elements,\n absl::string_view name) {\n elements.ForEachMutableElementPostOrder(\n [&](const ShapeIndex& index, HloInstruction** element) {\n const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index);\n if (subshape.IsTuple()) {\n absl::InlinedVector children;\n ShapeIndex child_index = index;\n for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {\n child_index.push_back(i);\n children.push_back(elements.element(child_index));\n child_index.pop_back();\n }\n std::string new_name;\n if (!name.empty()) {\n if (index.empty()) {\n new_name = std::string(name);\n } else {\n new_name =\n absl::StrCat(name, \".assembled.\", absl::StrJoin(index, \".\"));\n }\n }\n *element = computation->AddInstruction(\n HloInstruction::CreateTuple(children), new_name);\n }\n });\n return elements.element({});\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/tuple_util.h\"\n#include \n#include \n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/verified_hlo_module.h\"\nnamespace xla {\nnamespace {\nnamespace op = ::xla::testing::opcode_matchers;\nusing TupleUtilTest = HloTestBase;\nTEST_F(TupleUtilTest, ExtractPrefix) {\n const std::string hlo_string = R\"(\nHloModule Module\nENTRY entry {\n p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)\n ROOT p1 = f32[32,32]{1,0} parameter(1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* param0 =\n module->entry_computation()->parameter_instruction(0);\n HloInstruction* prefix = TupleUtil::ExtractPrefix(param0, 2);\n EXPECT_THAT(prefix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),\n op::GetTupleElement(op::Parameter(0), 1)));\n}\nTEST_F(TupleUtilTest, AppendSuffix) {\n const std::string hlo_string = R\"(\nHloModule Module\nENTRY entry {\n p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)\n ROOT p1 = f32[32,32]{1,0} parameter(1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* param0 =\n module->entry_computation()->parameter_instruction(0);\n HloInstruction* param1 =\n module->entry_computation()->parameter_instruction(1);\n HloInstruction* with_suffix =\n TupleUtil::AppendSuffix(param0, {param1, param1});\n EXPECT_THAT(with_suffix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),\n op::GetTupleElement(op::Parameter(0), 1),\n op::GetTupleElement(op::Parameter(0), 2),\n op::Parameter(1), op::Parameter(1)));\n}\nTEST_F(TupleUtilTest, ReplaceTupleWithTupleInst) {\n const std::string hlo_string = R\"(\nHloModule Module\nENTRY entry {\n p0 = f32[32,32]{1,0} parameter(0)\n p1 = f32[32,32]{1,0} parameter(1)\n ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p0, p1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* p0 = FindInstruction(module.get(), \"p0\");\n HloInstruction* tuple = FindInstruction(module.get(), \"tuple\");\n TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,\n TupleUtil::ReplaceTupleWith(p0, tuple, {1}));\n EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(0), op::Parameter(0)));\n}\nTEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInst) {\n const std::string hlo_string = R\"(\nHloModule Module\nENTRY entry {\n ROOT p0 = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)\n p1 = f32[32,32]{1,0} parameter(1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* p0 = FindInstruction(module.get(), \"p0\");\n HloInstruction* p1 = FindInstruction(module.get(), \"p1\");\n TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,\n TupleUtil::ReplaceTupleWith(p1, p0, {0}));\n EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(1),\n op::GetTupleElement(op::Parameter(0), 1)));\n}\nTEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInstNested) {\n const std::string hlo_string = R\"(\nHloModule Module\nENTRY entry {\n ROOT p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)\n p1 = f32[32,32]{1,0} parameter(1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* p0 = FindInstruction(module.get(), \"p0\");\n HloInstruction* p1 = FindInstruction(module.get(), \"p1\");\n TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,\n TupleUtil::ReplaceTupleWith(p1, p0, {1, 0}));\n EXPECT_THAT(\n new_tuple,\n op::Tuple(op::GetTupleElement(op::Parameter(0), 0),\n op::Tuple(op::Parameter(1),\n op::GetTupleElement(\n op::GetTupleElement(op::Parameter(0), 1), 1))));\n}\nTEST_F(TupleUtilTest, AddGetTupleElements) {\n const std::string hlo_string = R\"(\nHloModule Module\nENTRY entry {\n p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)\n gte = (f32[32,32]{1,0}, f32[32,32]{1,0}) get-tuple-element(p0), index=1\n ROOT root = f32[32,32]{1,0} get-tuple-element(gte), index=1\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* p0 = FindInstruction(module.get(), \"p0\");\n HloInstruction* existing_gte = FindInstruction(module.get(), \"gte\");\n HloInstruction* new_gte = TupleUtil::AddGetTupleElements({p0, {1, 0}});\n EXPECT_THAT(new_gte, op::GetTupleElement(existing_gte, 0));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1161,"cells":{"ID":{"kind":"string","value":"0c1e0df2-8f32-4754-b90e-7b6a758a3abc"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"all_gather_decomposer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/all_gather_decomposer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/all_gather_decomposer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/all_gather_decomposer.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/collective_decomposer_utils.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nHloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) {\n HloComputation::Builder sum_b(\"add\");\n auto x = sum_b.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(type, {}), \"x\"));\n auto y = sum_b.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(type, {}), \"y\"));\n if (type == PRED) {\n sum_b.AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y));\n } else {\n sum_b.AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y));\n }\n HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build());\n return reduction;\n}\n} \nHloInstruction* AllGatherDecomposer::TranslateAllGatherToAllReducePerOperand(\n CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag,\n const Shape& output_shape, HloInstruction* operand, HloComputation* comp,\n int64_t ag_dim) {\n std::vector start_indices =\n CreateStartIndicesForCollectiveDecomposition(\n group_mode, ag.replica_groups(), operand->shape(), ag_dim, comp)\n .value();\n auto zero = comp->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(output_shape.element_type())));\n zero = comp->AddInstruction(\n HloInstruction::CreateBroadcast(output_shape, zero, {}));\n auto dus = comp->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(\n zero->shape(), zero, operand, start_indices));\n auto ar = comp->AddInstruction(HloInstruction::CreateAllReduce(\n dus->shape(), {dus},\n MakeBinaryAdd(dus->shape().element_type(), comp->parent()),\n ag.device_list(),\n ag.constrain_layout(), ag.channel_id(),\n ag.use_global_device_ids()));\n return ar;\n}\nabsl::Status AllGatherDecomposer::DecomposeAllGather(\n HloAllGatherInstruction* ag, HloComputation* comp) {\n TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,\n GetCollectiveOpGroupMode(ag->channel_id().has_value(),\n ag->use_global_device_ids()));\n if (ag->operand_count() > 1) {\n std::vector tuple_inputs;\n for (int i = 0; i < ag->operand_count(); ++i) {\n auto* input_operand = ag->mutable_operand(i);\n const auto& output_shape = ag->shape().tuple_shapes(i);\n auto* ar = TranslateAllGatherToAllReducePerOperand(\n group_mode, *ag, output_shape, input_operand, comp,\n ag->all_gather_dimension());\n tuple_inputs.push_back(ar);\n }\n auto tup = comp->AddInstruction(HloInstruction::CreateTuple(tuple_inputs));\n TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(tup));\n } else {\n auto* ar = TranslateAllGatherToAllReducePerOperand(\n group_mode, *ag, ag->shape(), ag->mutable_operand(0), comp,\n ag->all_gather_dimension());\n TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(ar));\n }\n TF_RETURN_IF_ERROR(comp->RemoveInstructionAndUnusedOperands(ag));\n return absl::OkStatus();\n}\nabsl::StatusOr AllGatherDecomposer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (auto comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto hlo : comp->MakeInstructionPostOrder()) {\n if (hlo->opcode() != HloOpcode::kAllGather) {\n continue;\n }\n auto ag = Cast(hlo);\n if (ShouldDecompose(*ag)) {\n TF_RETURN_IF_ERROR(DecomposeAllGather(ag, comp));\n changed = true;\n }\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/all_gather_decomposer.h\"\n#include \n#include \n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing ::testing::AllOf;\nnamespace op = xla::testing::opcode_matchers;\nusing AllGatherDecomposerTest = HloTestBase;\nTEST_F(AllGatherDecomposerTest, CrossReplicaAllGather) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n param0 = f32[10,20] parameter(0)\n ROOT ag = f32[10,80] all-gather(param0), replica_groups={}, dimensions={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n AllGatherDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::AllReduce(op::DynamicUpdateSlice(\n op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),\n op::Multiply(op::ReplicaId(), op::Constant()))));\n}\nTEST_F(AllGatherDecomposerTest, CrossReplicaAndPartitionAllGather) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n param0 = f32[10,20] parameter(0)\n ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0}}, channel_id=1,\n dimensions={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n AllGatherDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::AllReduce(op::DynamicUpdateSlice(\n op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),\n op::Multiply(op::PartitionId(), op::Constant()))));\n}\nTEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTrivialGroup) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n param0 = f32[10,20] parameter(0)\n ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0,1,2,3}},\n dimensions={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n AllGatherDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::AllReduce(op::DynamicUpdateSlice(\n op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),\n op::Multiply(op::ReplicaId(), op::Constant()))));\n}\nTEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroups) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n param0 = f32[10,20] parameter(0)\n ROOT ag = f32[10,80] all-gather(param0),\n replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n AllGatherDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n auto id =\n AllOf(op::Shape(\"u32[]\"),\n op::Reshape(op::DynamicSlice(op::Constant(), op::ReplicaId())));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::AllReduce(op::DynamicUpdateSlice(\n op::Broadcast(op::Constant()), op::Parameter(0),\n op::Constant(), op::Multiply(id, op::Constant()))));\n}\nTEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroupsGlobalIds) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n param0 = f32[10,20] parameter(0)\n ROOT ag = f32[10,80] all-gather(param0),\n replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}, channel_id=1,\n use_global_device_ids=true\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n AllGatherDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n auto global_id =\n op::Add(op::Multiply(op::ReplicaId(), op::Constant()), op::PartitionId());\n auto id = AllOf(op::Shape(\"u32[]\"),\n op::Reshape(op::DynamicSlice(op::Constant(), global_id)));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::AllReduce(op::DynamicUpdateSlice(\n op::Broadcast(op::Constant()), op::Parameter(0),\n op::Constant(), op::Multiply(id, op::Constant()))));\n}\nTEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTuple) {\n const std::string module_str = R\"(\nHloModule module\nENTRY entry {\n param0 = f32[10,20] parameter(0)\n param1 = f32[10,16] parameter(1)\n ROOT ag = (f32[10,80], f32[10,64]) all-gather(param0, param1),\n replica_groups={}, dimensions={1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnUnverifiedModule((module_str)));\n AllGatherDecomposer decomposer;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(\n op::AllReduce(op::DynamicUpdateSlice(\n op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),\n op::Multiply(op::ReplicaId(), op::Constant()))),\n op::AllReduce(op::DynamicUpdateSlice(\n op::Broadcast(op::Constant()), op::Parameter(1), op::Constant(),\n op::Multiply(op::ReplicaId(), op::Constant())))));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1162,"cells":{"ID":{"kind":"string","value":"88b9adff-15bb-48a6-920b-e030f186c9d9"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"all_reduce_folder"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/all_reduce_folder.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/all_reduce_folder_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/all_reduce_folder.h\"\n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/collective_device_list.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/service/all_reduce_key.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nnamespace {\nstd::optional> FoldReplicaGroups(\n absl::Span replica_groups0,\n absl::Span replica_groups1) {\n int64_t num_replicas = 0;\n for (const ReplicaGroup &rg : replica_groups0) {\n for (int64_t id : rg.replica_ids()) {\n num_replicas = std::max(num_replicas, id);\n }\n }\n num_replicas++;\n std::vector replica_group_no(num_replicas, -1);\n for (int group_no = 0; group_no < replica_groups0.size(); ++group_no) {\n for (int64_t id : replica_groups0[group_no].replica_ids()) {\n replica_group_no[id] = group_no;\n }\n }\n absl::flat_hash_map, int64_t> contributor_set_id;\n std::vector contributing_replicas_set_id(num_replicas, 0);\n int64_t next_id = 1;\n for (const ReplicaGroup &rg : replica_groups1) {\n std::vector contributors(num_replicas, false);\n for (int64_t id : rg.replica_ids()) {\n int64_t group_no = replica_group_no[id];\n for (int64_t contrib : replica_groups0[group_no].replica_ids()) {\n if (contributors[contrib]) {\n return std::nullopt;\n }\n contributors[contrib] = true;\n }\n }\n int64_t set_id;\n auto it = contributor_set_id.find(contributors);\n if (it != contributor_set_id.end()) {\n set_id = it->second;\n } else {\n set_id = next_id++;\n contributor_set_id[contributors] = set_id;\n }\n for (int64_t id : rg.replica_ids()) {\n contributing_replicas_set_id[id] = set_id;\n }\n }\n std::vector new_replica_groups;\n new_replica_groups.reserve(contributor_set_id.size());\n for (const auto &it : contributor_set_id) {\n const std::vector &contributors = it.first;\n const int64_t set_id = it.second;\n new_replica_groups.emplace_back();\n ReplicaGroup &group = new_replica_groups.back();\n for (int64_t replica = 0; replica < num_replicas; ++replica) {\n if (contributors[replica]) {\n if (contributing_replicas_set_id[replica] != set_id) {\n return std::nullopt;\n }\n group.add_replica_ids(replica);\n }\n }\n }\n absl::c_sort(new_replica_groups,\n [](const ReplicaGroup &a, const ReplicaGroup &b) {\n return a.replica_ids(0) < b.replica_ids(0);\n });\n return new_replica_groups;\n}\n} \nabsl::StatusOr AllReduceFolder::Run(\n HloModule *module,\n const absl::flat_hash_set &execution_threads) {\n if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {\n VLOG(1) << \"Skip AllReduceFolder because the module contains all-reduce \"\n \"with constrained layouts\";\n return false;\n }\n int64_t next_channel_id = hlo_query::NextChannelId(*module);\n bool changed = false;\n for (auto computation : module->computations(execution_threads)) {\n for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {\n if (inst->opcode() != HloOpcode::kAllReduce ||\n inst->operand(0)->opcode() != HloOpcode::kAllReduce) {\n continue;\n }\n auto *ar0 = Cast(inst->mutable_operand(0));\n auto *ar1 = Cast(inst);\n if (ar0->user_count() != 1) {\n continue;\n }\n std::optional key0 = GetAllReduceKey(\n ar0, nullptr, true);\n std::optional key1 = GetAllReduceKey(\n ar1, nullptr, true);\n if (!key0 || !key1 || *key0 != *key1 || ar0->replica_groups().empty() ||\n ar1->replica_groups().empty()) {\n continue;\n }\n std::optional> new_replica_groups =\n FoldReplicaGroups(ar0->replica_groups(), ar1->replica_groups());\n if (!new_replica_groups) {\n continue;\n }\n std::optional channel_id;\n if (ar0->channel_id()) {\n channel_id = next_channel_id++;\n }\n HloInstruction *new_ar =\n computation->AddInstruction(HloInstruction::CreateAllReduce(\n ar0->shape(), ar0->operands(), ar0->to_apply(),\n CollectiveDeviceList(*new_replica_groups),\n false, channel_id,\n ar0->use_global_device_ids()));\n TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(new_ar));\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/all_reduce_folder.h\"\n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace matcher = xla::testing::opcode_matchers;\nusing ::testing::HasSubstr;\nclass AllReduceFolderTest : public HloTestBase {};\nconst char *k2AllReduce = R\"(\n HloModule m\n sum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n }\n ENTRY main {\n p0 = f32[8] parameter(0)\n ar0 = f32[8] all-reduce(p0), replica_groups=$group_0, to_apply=sum\n ROOT ar1 = f32[8] all-reduce(ar0), replica_groups=$group_1, to_apply=sum\n }\n )\";\nsize_t AllReduceCount(HloModule *module) {\n return absl::c_count_if(module->entry_computation()->instructions(),\n HloPredicateIsOp);\n}\nvoid ExpectOneAllReduce(HloModule *module,\n absl::string_view target_replica_groups) {\n EXPECT_EQ(AllReduceCount(module), 1);\n HloInstruction *root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, matcher::AllReduce(matcher::Parameter(0)));\n EXPECT_THAT(root->ToString(), HasSubstr(target_replica_groups));\n}\nTEST_F(AllReduceFolderTest, Simple) {\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true,\n {{\"$group_0\", \"{{0,1},{2,3}}\"},\n {\"$group_1\", \"{{0,2},{1,3}}\"}}));\n ExpectOneAllReduce(module.get(), \"replica_groups={{0,1,2,3}}\");\n}\nTEST_F(AllReduceFolderTest, SimpleSwap) {\n TF_ASSERT_OK_AND_ASSIGN(\n auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true,\n {{\"$group_1\", \"{{0,1},{2,3}}\"},\n {\"$group_0\", \"{{0,2},{1,3}}\"}}));\n ExpectOneAllReduce(module.get(), \"replica_groups={{0,1,2,3}}\");\n}\nTEST_F(AllReduceFolderTest, BothEmptyReplicaGroups_NotTransformed) {\n TF_ASSERT_OK(RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), false,\n {{\"$group_0\", \"{}\"}, {\"$group_1\", \"{}\"}}));\n}\nTEST_F(AllReduceFolderTest, EmptyReplicaGroups_NotTransformed) {\n TF_ASSERT_OK(RunAndCheckHloRewrite(\n k2AllReduce, AllReduceFolder(), false,\n {{\"$group_0\", \"{}\"}, {\"$group_1\", \"{{0,2},{1,3}}\"}}));\n}\nTEST_F(AllReduceFolderTest, MismatchOtherProperties0_NotTransformed) {\n absl::string_view hlo_string = R\"(\n HloModule m\n sum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n }\n ENTRY main {\n p0 = f32[8] parameter(0)\n ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, channel_id=1, to_apply=sum\n ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=sum\n }\n )\";\n TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));\n}\nTEST_F(AllReduceFolderTest, MismatchOtherProperties1_NotTransformed) {\n absl::string_view hlo_string = R\"(\n HloModule m\n sum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n }\n mul {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT mul = f32[] multiply(a, b)\n }\n ENTRY main {\n p0 = f32[8] parameter(0)\n ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum\n ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=mul\n }\n )\";\n TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));\n}\nTEST_F(AllReduceFolderTest, NotFoldable_NotTransformed) {\n absl::string_view hlo_string = R\"(\n HloModule m\n sum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n }\n ENTRY main {\n p0 = f32[8] parameter(0)\n ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum\n ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,1},{2,3}}, to_apply=sum\n }\n )\";\n TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));\n}\nTEST_F(AllReduceFolderTest, Foldable0) {\n absl::string_view hlo_string = R\"(\n HloModule m\n sum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n }\n ENTRY main {\n p0 = f32[8] parameter(0)\n ar0 = f32[8] all-reduce(p0), replica_groups={{0,4},{1,5},{2,3},{6,7}}, to_apply=sum\n ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,5},{4,1},{2,7},{3,6}}, to_apply=sum\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunAndCheckHloRewrite(hlo_string, AllReduceFolder()));\n ExpectOneAllReduce(module.get(), \"replica_groups={{0,1,4,5},{2,3,6,7}}\");\n}\nTEST_F(AllReduceFolderTest, FoldableChain) {\n absl::string_view hlo_string = R\"(\n HloModule m\n sum {\n a = f32[] parameter(0)\n b = f32[] parameter(1)\n ROOT add.2 = f32[] add(a, b)\n }\n ENTRY main {\n p0 = f32[8] parameter(0)\n ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=sum\n ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3},{4,6},{5,7}}, to_apply=sum\n ROOT ar2 = f32[8] all-reduce(ar1), replica_groups={{0,4},{1,5},{2,6},{3,7}}, to_apply=sum\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n RunAndCheckHloRewrite(hlo_string, AllReduceFolder()));\n ExpectOneAllReduce(module.get(), \"replica_groups={{0,1,2,3,4,5,6,7}}\");\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1163,"cells":{"ID":{"kind":"string","value":"1adb9aee-06f6-4921-bc77-f195f56319b6"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"sharding_remover"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/sharding_remover.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/sharding_remover_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/sharding_remover.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/spmd/shardy/constants.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nabsl::StatusOr ShardingRemover::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n const absl::flat_hash_set to_remove_sharding_ops = {\n \"Sharding\", \"SPMDShardToFullShape\", \"SPMDFullToShardShape\",\n sdy::kFuncResultShardingTargetName};\n for (HloComputation* computation : module->computations(execution_threads)) {\n auto instructions = computation->MakeInstructionPostOrder();\n std::reverse(instructions.begin(), instructions.end());\n for (HloInstruction* instruction : instructions) {\n if (instruction->opcode() != HloOpcode::kCustomCall) {\n continue;\n }\n if (!to_remove_sharding_ops.contains(instruction->custom_call_target())) {\n continue;\n }\n CHECK(instruction->operand_count() == 1)\n << \"Sharding instruction must have exactly one operand\";\n TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(\n instruction->mutable_operand(0), name()));\n changed = true;\n if (instruction->custom_call_target() == \"Sharding\" ||\n instruction->custom_call_target() ==\n sdy::kFuncResultShardingTargetName) {\n auto copy = computation->AddInstruction(\n HloInstruction::CreateUnary(instruction->shape(), HloOpcode::kCopy,\n instruction->mutable_operand(0)));\n TF_RETURN_IF_ERROR(computation->ReplaceInstruction(instruction, copy));\n instruction = copy;\n }\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/sharding_remover.h\"\n#include \n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nusing ShardingRemoverTest = HloTestBase;\nTEST_F(ShardingRemoverTest, RemoveSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %parameter.3379 = f32[1,1]{1,0} parameter(0)\n %custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),\n custom_call_target=\"Sharding\", sharding={replicated}\n ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));\n EXPECT_TRUE(changed);\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::Reshape(op::Parameter()));\n auto parameter = root->operand(0);\n EXPECT_EQ(parameter->user_count(), 2);\n bool replaced = false;\n for (HloInstruction* user : parameter->users()) {\n if (user->opcode() == HloOpcode::kCopy) {\n replaced = true;\n EXPECT_THAT(user, op::Copy(op::Parameter()));\n break;\n }\n }\n EXPECT_TRUE(replaced);\n}\nTEST_F(ShardingRemoverTest, RemoveSPMDShardingToFullShape) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %parameter.3379 = f32[1,1]{1,0} parameter(0)\n %custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),\n custom_call_target=\"SPMDShardToFullShape\", sharding={replicated}\n ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));\n EXPECT_TRUE(changed);\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::Reshape(op::Parameter()));\n}\nTEST_F(ShardingRemoverTest, RemoveSPMDFullToShardShape) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %parameter.3379 = f32[1,1]{1,0} parameter(0)\n %custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379),\n custom_call_target=\"SPMDFullToShardShape\", sharding={replicated}\n ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));\n EXPECT_TRUE(changed);\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::Reshape(op::Parameter()));\n}\nTEST_F(ShardingRemoverTest, NoChangeForOtherCustomCall) {\n const char* const hlo_string = R\"(\nHloModule cluster_2013453984438090939__.47\nENTRY %cluster_2013453984438090939__.47\n (arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) {\n %arg_tuple.1 = bf16[2,209664] parameter(0)\n %custom-call = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})\n custom-call(bf16[2,209664]{1,0} %arg_tuple.1), custom_call_target=\"TopK\"\n %get-tuple-element = bf16[2,2000]{1,0}\n get-tuple-element((bf16[2,2000]{1,0}, s32[2,2000]{1,0}) %custom-call),\n index=0\n %get-tuple-element.1 = s32[2,2000]{1,0} get-tuple-element((bf16[2,2000]{1,0},\n s32[2,2000]{1,0}) %custom-call), index=1, sharding={replicated}\n ROOT %tuple.46 = (bf16[2,2000]{1,0}, s32[2,2000]{1,0})\n tuple(bf16[2,2000]{1,0} %get-tuple-element, s32[2,2000]{1,0}\n %get-tuple-element.1),\n metadata={op_name=\"XLA_Retvals\"}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get()));\n EXPECT_FALSE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_remover.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_remover_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1164,"cells":{"ID":{"kind":"string","value":"de45acd3-74d9-4eaa-9c40-ea4acfaf2a07"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"tuple_points_to_analysis"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/tuple_points_to_analysis.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/tuple_points_to_analysis_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/tuple_points_to_analysis.h\"\n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_join.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/map_util.h\"\n#include \"xla/service/hlo_dataflow_analysis.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\nnamespace xla {\nstd::string BufferAlias::ToString() const {\n return absl::StrCat(\"BufferAlias(\", instruction_->name(), \"[\",\n absl::StrJoin(index_, \",\"), \"])\");\n}\nstd::ostream& operator<<(std::ostream& out, const BufferAlias& buffer_alias) {\n out << buffer_alias.ToString();\n return out;\n}\nbool PointsToSet::IsAmbiguous() const {\n bool ambiguous = false;\n ForEachElement(\n [&ambiguous](const ShapeIndex& , const BufferList& points_to) {\n ambiguous |= points_to.size() > 1;\n });\n return ambiguous;\n}\nbool PointsToSet::IsDistinct() const {\n bool distinct = true;\n absl::flat_hash_set all_points_to;\n ForEachElement([&](const ShapeIndex& , const BufferList& points_to) {\n for (auto& buffer : points_to) {\n if (all_points_to.contains(buffer)) {\n distinct = false;\n }\n all_points_to.insert(buffer);\n }\n });\n return distinct;\n}\nsize_t PointsToSet::size() const {\n return CreateFlattenedSet().size();\n}\nPointsToSet::BufferSet PointsToSet::CreateFlattenedSet() const {\n BufferSet flat_set;\n ForEachElement(\n [&flat_set](const ShapeIndex& , const BufferList& buffers) {\n flat_set.insert(buffers.begin(), buffers.end());\n });\n return flat_set;\n}\nbool PointsToSet::ContainsBuffer(const LogicalBuffer& buffer) const {\n bool found = false;\n ForEachElement([&found, &buffer](const ShapeIndex& ,\n const BufferList& pointed_to_buffers) {\n if (!found && absl::c_linear_search(pointed_to_buffers, &buffer)) {\n found = true;\n }\n });\n return found;\n}\nbool PointsToSet::ContainsBufferAtIndex(const LogicalBuffer& buffer,\n const ShapeIndex& index) const {\n const auto& pointed_to_buffers = element(index);\n return absl::c_linear_search(pointed_to_buffers, &buffer);\n}\nvoid PointsToSet::AddPointedToBuffer(const LogicalBuffer& buffer,\n const ShapeIndex& index) {\n if (ContainsBufferAtIndex(buffer, index)) {\n return;\n }\n mutable_element(index)->push_back(&buffer);\n}\nconst PointsToSet::SourceSet& PointsToSet::tuple_sources(\n const ShapeIndex& index) const {\n return tree_.element(index).tuple_sources;\n}\nvoid PointsToSet::add_tuple_source(const ShapeIndex& index,\n HloInstruction* tuple) {\n tree_.mutable_element(index)->tuple_sources.insert(tuple);\n}\nnamespace {\nvoid GatherFusionInstructions(\n HloInstruction* instruction,\n std::vector* fusion_instructions) {\n CHECK_EQ(HloOpcode::kFusion, instruction->opcode());\n for (auto* fused : instruction->fused_instructions()) {\n if (fused->opcode() == HloOpcode::kFusion) {\n GatherFusionInstructions(fused, fusion_instructions);\n }\n }\n fusion_instructions->push_back(instruction);\n}\n} \n absl::StatusOr>\nTuplePointsToAnalysis::Run(const HloModule* module) {\n auto logical_buffer_analysis = LogicalBufferAnalysis::Run(module);\n std::unique_ptr analysis(new TuplePointsToAnalysis(\n module, std::move(logical_buffer_analysis).value()));\n TF_RETURN_IF_ERROR(analysis->Analyze());\n return std::move(analysis);\n}\nabsl::Status TuplePointsToAnalysis::Analyze() {\n per_instruction_.clear();\n per_instruction_.reserve(module_->instruction_count());\n logical_buffer_aliases_.clear();\n logical_buffer_aliases_.resize(\n logical_buffer_analysis_->num_logical_buffers());\n std::vector fusion_instructions;\n for (auto* computation : module_->MakeNonfusionComputations()) {\n TF_RETURN_IF_ERROR(computation->Accept(this));\n TF_RETURN_IF_ERROR(\n PopulateDefinedBuffersAndAliases(computation->instructions()));\n for (auto* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kFusion) {\n GatherFusionInstructions(instruction, &fusion_instructions);\n }\n }\n }\n for (auto* instruction : fusion_instructions) {\n TF_RETURN_IF_ERROR(instruction->fused_expression_root()->Accept(this));\n TF_RETURN_IF_ERROR(\n PopulateDefinedBuffersAndAliases(instruction->fused_instructions()));\n }\n XLA_VLOG_LINES(3, ToString());\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::PopulateDefinedBuffersAndAliases(\n const decltype(std::declval()\n .instructions())& instructions) {\n for (auto* instruction : instructions) {\n PerInstruction* pi = PerInst(instruction);\n TF_RETURN_IF_ERROR(GatherBuffersDefinedByInstruction(\n instruction, &pi->instruction_defined_buffers));\n const PointsToSet& points_to_set = GetPointsToSet(instruction);\n points_to_set.ForEachElement(\n [this, &instruction](\n const ShapeIndex& index,\n const PointsToSet::BufferList& pointed_to_buffers) {\n for (const LogicalBuffer* buffer : pointed_to_buffers) {\n logical_buffer_aliases_[buffer->id()].emplace_back(instruction,\n index);\n }\n });\n }\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::DefaultAction(\n HloInstruction* hlo_instruction) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(hlo_instruction);\n points_to_set.ForEachMutableElement(\n [this, hlo_instruction](const ShapeIndex& index,\n PointsToSet::BufferList* buffers) {\n buffers->push_back(\n &logical_buffer_analysis_->GetBuffer(hlo_instruction, index));\n });\n if (hlo_instruction->shape().IsTuple()) {\n points_to_set.add_tuple_source({}, hlo_instruction);\n }\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleGetTupleElement(\n HloInstruction* get_tuple_element) {\n int64_t element_index = get_tuple_element->tuple_index();\n PointsToSet& points_to_set = CreateEmptyPointsToSet(get_tuple_element);\n const PointsToSet& operand_points_to_set =\n *PerInst(get_tuple_element->operand(0))->points_to_set;\n points_to_set.ForEachMutableElement(\n [&](const ShapeIndex& target_index, PointsToSet::BufferList* points_to) {\n ShapeIndex src_index;\n src_index.push_back(element_index);\n for (auto element : target_index) {\n src_index.push_back(element);\n }\n *points_to = operand_points_to_set.element(src_index);\n for (HloInstruction* tuple :\n operand_points_to_set.tuple_sources(src_index)) {\n points_to_set.add_tuple_source(target_index, tuple);\n }\n });\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleCopy(HloInstruction* copy) {\n PointsToSet& points_to_set = CreateCopiedPointsToSet(copy, copy->operand(0));\n points_to_set.mutable_element({})->clear();\n points_to_set.AddPointedToBuffer(\n logical_buffer_analysis_->GetBuffer(copy, {}),\n {});\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleBitcast(HloInstruction* bitcast) {\n CreateCopiedPointsToSet(bitcast, bitcast->operand(0));\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleDomain(HloInstruction* domain) {\n CreateCopiedPointsToSet(domain, domain->operand(0));\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleAddDependency(\n HloInstruction* add_dependency) {\n CreateCopiedPointsToSet(add_dependency, add_dependency->operand(0));\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done);\n points_to_set.AddPointedToBuffer(\n logical_buffer_analysis_->GetBuffer(recv_done, {}),\n {});\n points_to_set.AddPointedToBuffer(\n logical_buffer_analysis_->GetBuffer(recv_done, {1}),\n {1});\n const PointsToSet& operand_points_to_set =\n GetPointsToSet(recv_done->operand(0));\n points_to_set.ForEachMutableElement(\n [&points_to_set, &operand_points_to_set](\n const ShapeIndex& index, PointsToSet::BufferList* buffers) {\n if (index.empty() || index[0] != 0) {\n return;\n }\n *buffers = operand_points_to_set.element(index);\n for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) {\n points_to_set.add_tuple_source(index, tuple_source);\n }\n });\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleAsyncStart(\n HloInstruction* async_start) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(async_start);\n points_to_set.ForEachMutableElement(\n [&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {\n if (target_index.size() >= 2 && target_index.front() == 0) {\n const PointsToSet& operand_points_to_set =\n GetPointsToSet(async_start->operand(target_index[1]));\n ShapeIndex source_index(target_index.begin() + 2, target_index.end());\n *buffers = operand_points_to_set.element(source_index);\n for (HloInstruction* tuple :\n operand_points_to_set.tuple_sources(source_index)) {\n points_to_set.add_tuple_source(target_index, tuple);\n }\n } else {\n buffers->push_back(\n &logical_buffer_analysis_->GetBuffer(async_start, target_index));\n }\n });\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleAsyncUpdate(\n HloInstruction* async_update) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(async_update);\n const PointsToSet& operand_points_to_set =\n GetPointsToSet(async_update->operand(0));\n CHECK_EQ(async_update->shape(), async_update->operand(0)->shape());\n points_to_set.ForEachMutableElement([&](const ShapeIndex& index,\n PointsToSet::BufferList* buffers) {\n *buffers = operand_points_to_set.element(index);\n for (HloInstruction* tuple : operand_points_to_set.tuple_sources(index)) {\n points_to_set.add_tuple_source(index, tuple);\n }\n });\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleAsyncDone(\n HloInstruction* async_done) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(async_done);\n const PointsToSet& operand_points_to_set =\n GetPointsToSet(async_done->operand(0));\n operand_points_to_set.ForEachElement(\n [&points_to_set, &operand_points_to_set](\n const ShapeIndex& src_index,\n const PointsToSet::BufferList& points_to) {\n if (!src_index.empty() && src_index.front() == 1) {\n const ShapeIndex target_index(src_index.begin() + 1, src_index.end());\n *points_to_set.mutable_element(target_index) = points_to;\n for (HloInstruction* tuple :\n operand_points_to_set.tuple_sources(src_index)) {\n points_to_set.add_tuple_source(target_index, tuple);\n }\n }\n });\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleCopyStart(\n HloInstruction* copy_start) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_start);\n const PointsToSet& operand_points_to_set =\n GetPointsToSet(copy_start->operand(0));\n points_to_set.ForEachMutableElement(\n [&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {\n if (target_index == ShapeIndex({1})) {\n *buffers = operand_points_to_set.element({});\n } else {\n buffers->push_back(\n &logical_buffer_analysis_->GetBuffer(copy_start, target_index));\n }\n });\n for (HloInstruction* tuple :\n operand_points_to_set.tuple_sources({})) {\n points_to_set.add_tuple_source({1}, tuple);\n }\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleCopyDone(HloInstruction* copy_done) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_done);\n const PointsToSet& operand_points_to_set =\n GetPointsToSet(copy_done->operand(0));\n operand_points_to_set.ForEachElement(\n [&points_to_set, &operand_points_to_set](\n const ShapeIndex& src_index,\n const PointsToSet::BufferList& points_to) {\n if (src_index == ShapeIndex({0})) {\n const ShapeIndex target_index = {};\n *points_to_set.mutable_element(target_index) = points_to;\n for (HloInstruction* tuple :\n operand_points_to_set.tuple_sources(src_index)) {\n points_to_set.add_tuple_source(target_index, tuple);\n }\n }\n });\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) {\n PointsToSet& points_to_set = CreateEmptyPointsToSet(send);\n auto top_buffer = points_to_set.mutable_element(ShapeIndex({}));\n top_buffer->push_back(\n &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({})));\n points_to_set.add_tuple_source({}, send);\n auto context_buffer = points_to_set.mutable_element(ShapeIndex({1}));\n context_buffer->push_back(\n &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({1})));\n auto token_buffer = points_to_set.mutable_element(ShapeIndex({2}));\n token_buffer->push_back(\n &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({2})));\n const PointsToSet& operand_points_to_set = GetPointsToSet(send->operand(0));\n operand_points_to_set.ForEachElement(\n [&points_to_set, &operand_points_to_set](\n const ShapeIndex& src_index,\n const PointsToSet::BufferList& points_to) {\n ShapeIndex target_index({0});\n for (auto element : src_index) {\n target_index.push_back(element);\n }\n *points_to_set.mutable_element(target_index) = points_to;\n for (HloInstruction* tuple :\n operand_points_to_set.tuple_sources(src_index)) {\n points_to_set.add_tuple_source(target_index, tuple);\n }\n });\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleTuple(HloInstruction* tuple) {\n absl::Span operands(tuple->operands());\n PointsToSet& points_to_set = CreateEmptyPointsToSet(tuple);\n points_to_set.AddPointedToBuffer(\n logical_buffer_analysis_->GetBuffer(tuple, {}),\n {});\n for (int64_t i = 0; i < operands.size(); ++i) {\n const PointsToSet& operand_points_to_set =\n *PerInst(operands[i])->points_to_set;\n operand_points_to_set.ForEachElement(\n [&points_to_set, &operand_points_to_set, i](\n const ShapeIndex& src_index,\n const PointsToSet::BufferList& points_to) {\n ShapeIndex target_index;\n target_index.push_back(i);\n for (auto element : src_index) {\n target_index.push_back(element);\n }\n *points_to_set.mutable_element(target_index) = points_to;\n for (HloInstruction* tuple :\n operand_points_to_set.tuple_sources(src_index)) {\n points_to_set.add_tuple_source(target_index, tuple);\n }\n });\n }\n points_to_set.add_tuple_source({}, tuple);\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleCustomCall(\n HloInstruction* custom_call) {\n auto ccall = Cast(custom_call);\n PointsToSet& points_to_set = CreateEmptyPointsToSet(custom_call);\n absl::flat_hash_map>\n aliased_outputs;\n for (const auto& pair : ccall->output_to_operand_aliasing()) {\n aliased_outputs.emplace(pair.first, pair.second);\n }\n points_to_set.ForEachMutableElement([&](const ShapeIndex& index,\n PointsToSet::BufferList* buffers) {\n auto it = aliased_outputs.find(index);\n if (it == aliased_outputs.end() || !alias_buffer_across_dataflow_) {\n points_to_set.AddPointedToBuffer(\n logical_buffer_analysis_->GetBuffer(custom_call, index), index);\n } else {\n const PointsToSet& input_set =\n *PerInst(ccall->operand(it->second.first))->points_to_set;\n for (const LogicalBuffer* input_buffer :\n input_set.element(it->second.second)) {\n points_to_set.AddPointedToBuffer(*input_buffer, index);\n }\n for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) {\n points_to_set.add_tuple_source(index, tuple);\n }\n }\n });\n points_to_set.add_tuple_source({}, custom_call);\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleFusion(HloInstruction* fusion) {\n auto cfusion = Cast(fusion);\n PointsToSet& points_to_set = CreateEmptyPointsToSet(fusion);\n absl::flat_hash_map>\n aliased_outputs;\n for (const auto& pair : cfusion->output_to_operand_aliasing()) {\n aliased_outputs.emplace(pair.first, pair.second);\n }\n points_to_set.ForEachMutableElement([&](const ShapeIndex& index,\n PointsToSet::BufferList* buffers) {\n auto it = aliased_outputs.find(index);\n if (it == aliased_outputs.end()) {\n points_to_set.AddPointedToBuffer(\n logical_buffer_analysis_->GetBuffer(fusion, index), index);\n } else {\n const PointsToSet& input_set =\n *PerInst(cfusion->operand(it->second.first))->points_to_set;\n for (const LogicalBuffer* input_buffer :\n input_set.element(it->second.second)) {\n points_to_set.AddPointedToBuffer(*input_buffer, index);\n }\n for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) {\n points_to_set.add_tuple_source(index, tuple);\n }\n }\n });\n points_to_set.add_tuple_source({}, fusion);\n return absl::OkStatus();\n}\nabsl::Status TuplePointsToAnalysis::HandleOptimizationBarrier(\n HloInstruction* barrier) {\n CreateCopiedPointsToSet(barrier, barrier->operand(0));\n return absl::OkStatus();\n}\nconst PointsToSet& TuplePointsToAnalysis::GetPointsToSet(\n const HloInstruction* hlo_instruction) const {\n return *PerInst(hlo_instruction)->points_to_set;\n}\nPointsToSet& TuplePointsToAnalysis::CreateEmptyPointsToSet(\n const HloInstruction* instruction) {\n PerInstruction* pi = PerInst(instruction);\n CHECK(pi->points_to_set == nullptr)\n << \"instruction should not have been present in the map.\";\n auto set = std::make_unique(&instruction->shape());\n pi->points_to_set = std::move(set);\n return *pi->points_to_set;\n}\nbool TuplePointsToAnalysis::InstructionDefinesBufferAtIndex(\n const HloInstruction* instruction, const ShapeIndex& index) const {\n const auto& buffers = GetPointsToSet(instruction).element(index);\n return (buffers.size() == 1 && buffers[0]->instruction() == instruction);\n}\nabsl::Status TuplePointsToAnalysis::VerifyBuffer(\n const LogicalBuffer& buffer) const {\n if (!InstructionDefinesBufferAtIndex(buffer.instruction(), buffer.index())) {\n return FailedPrecondition(\n \"LogicalBuffer %s is ill-defined: instruction %s does not define a \"\n \"buffer at that index\",\n buffer.ToString(), buffer.instruction()->name());\n }\n if (buffer.id() < 0 ||\n buffer.id() >= logical_buffer_analysis_->num_logical_buffers()) {\n return FailedPrecondition(\"LogicalBuffer %s is ill-defined: invalid id %d\",\n buffer.ToString(), buffer.id());\n }\n if (GetBuffer(buffer.id()).instruction() != buffer.instruction() ||\n GetBuffer(buffer.id()).index() != buffer.index()) {\n return FailedPrecondition(\n \"LogicalBuffer %s is ill-defined: buffer with same id differs: %s\",\n buffer.ToString(), GetBuffer(buffer.id()).ToString());\n }\n return absl::OkStatus();\n}\nconst LogicalBuffer& TuplePointsToAnalysis::GetBuffer(\n LogicalBuffer::Id id) const {\n CHECK_GE(id, 0);\n CHECK_LT(id, logical_buffer_analysis_->num_logical_buffers());\n return logical_buffer_analysis_->GetBuffer(id);\n}\nabsl::StatusOr TuplePointsToAnalysis::GetBufferDefinedAt(\n const HloInstruction* instruction, const ShapeIndex& index) const {\n const auto& buffers = GetPointsToSet(instruction).element(index);\n if (buffers.size() != 1 || buffers[0]->instruction() != instruction) {\n return FailedPrecondition(\n \"instruction %s does not define buffer at index {%s}\",\n instruction->name(), absl::StrJoin(index, \",\"));\n }\n return buffers[0];\n}\nconst TuplePointsToAnalysis::BufferAliasVector&\nTuplePointsToAnalysis::GetBufferAliases(const LogicalBuffer& buffer) const {\n return logical_buffer_aliases_[buffer.id()];\n}\nconst TuplePointsToAnalysis::BufferDefinitionVector&\nTuplePointsToAnalysis::GetBuffersDefinedByInstruction(\n const HloInstruction* instruction) const {\n return PerInst(instruction)->instruction_defined_buffers;\n}\nabsl::Status TuplePointsToAnalysis::GatherBuffersDefinedByInstruction(\n const HloInstruction* instruction,\n TuplePointsToAnalysis::BufferDefinitionVector* buffers) {\n GetPointsToSet(instruction)\n .ForEachElement([buffers, instruction](\n const ShapeIndex& index,\n const PointsToSet::BufferList& source_buffers) {\n CHECK(!source_buffers.empty());\n if (source_buffers.size() == 1 &&\n source_buffers[0]->instruction() == instruction) {\n DCHECK(source_buffers[0]->index() == index);\n buffers->push_back(source_buffers[0]);\n } else {\n for (const LogicalBuffer* source_buffer : source_buffers) {\n DCHECK(source_buffer->instruction() != instruction);\n }\n }\n });\n return absl::OkStatus();\n}\nPointsToSet& TuplePointsToAnalysis::CreateCopiedPointsToSet(\n const HloInstruction* instruction, const HloInstruction* src) {\n PointsToSet& dst_points_to_set = CreateEmptyPointsToSet(instruction);\n const PointsToSet& src_points_to_set = GetPointsToSet(src);\n dst_points_to_set.ForEachMutableElement(\n [&dst_points_to_set, &src_points_to_set](\n const ShapeIndex& index, PointsToSet::BufferList* buffers) {\n *buffers = src_points_to_set.element(index);\n for (auto& tuple_source : src_points_to_set.tuple_sources(index)) {\n dst_points_to_set.add_tuple_source(index, tuple_source);\n }\n });\n return *PerInst(instruction)->points_to_set;\n}\nstd::string TuplePointsToAnalysis::ToString() const {\n std::string output =\n absl::StrFormat(\"TuplePointsToSet for module %s:\\n\", module_->name());\n for (const auto* computation : module_->MakeNonfusionComputations()) {\n const char* entry =\n computation == module_->entry_computation() ? \"entry \" : \"\";\n absl::StrAppend(&output, entry, \"computation \", computation->name(), \":\\n\");\n for (const HloInstruction* instruction :\n computation->MakeInstructionPostOrder()) {\n InstructionToString(instruction, &output);\n if (instruction->opcode() == HloOpcode::kFusion) {\n for (auto* fused : instruction->fused_instructions()) {\n InstructionToString(fused, &output);\n }\n }\n }\n }\n absl::StrAppend(&output, \"LogicalBuffers:\\n\");\n for (const auto& b : logical_buffer_analysis_->logical_buffers()) {\n absl::StrAppend(&output, \" buffer \", b->ToString(), \":\\n\");\n for (const BufferAlias& alias : logical_buffer_aliases_[b->id()]) {\n absl::StrAppend(&output, \" alias \", alias.ToString(), \"\\n\");\n }\n }\n return output;\n}\nvoid TuplePointsToAnalysis::InstructionToString(\n const HloInstruction* instruction, std::string* output) const {\n const std::string prefix = instruction->IsFused() ? \" \" : \"\";\n absl::StrAppend(output, prefix, \" instruction \",\n instruction->ToShortString(), \":\\n\");\n const PointsToSet& points_to_set = GetPointsToSet(instruction);\n points_to_set.ForEachElement(\n [&prefix, &output](const ShapeIndex& index,\n const PointsToSet::BufferList& points_to) {\n absl::StrAppend(\n output, prefix, \" {\", absl::StrJoin(index, \",\"), \"}: \",\n absl::StrJoin(points_to, \", \",\n [](std::string* out, const LogicalBuffer* source) {\n out->append(source->ToString());\n }),\n \"\\n\");\n });\n}\nbool TuplePointsToAnalysis::DoesNotUseOperandBuffer(\n const HloInstruction* operand, const ShapeIndex& index,\n const HloInstruction* user) const {\n CHECK(user->IsUserOf(operand))\n << \"user: \" << user->ToString() << \" operand: \" << operand->ToString();\n if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) {\n return true;\n } else if (user->IsLoopFusion()) {\n auto it = absl::c_find_if(\n user->fused_parameters(), [&](HloInstruction* fused_param) {\n return user->operand(fused_param->parameter_number()) == operand;\n });\n CHECK(it != user->fused_parameters().end());\n const LogicalBuffer* buffer = GetBufferDefinedAt(*it, index).value();\n for (const BufferAlias& alias : GetBufferAliases(*buffer)) {\n for (HloInstruction* alias_user : alias.instruction()->users()) {\n if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(),\n alias_user)) {\n continue;\n }\n return false;\n }\n }\n return true;\n }\n return false;\n}\nstd::vector>\nTuplePointsToAnalysis::GetAllUsesOfInstructionAtIndex(\n HloInstruction* instruction, const ShapeIndex& index) const {\n std::vector> uses;\n const PointsToSet::BufferList& points_to =\n GetPointsToSet(instruction).element(index);\n for (const LogicalBuffer* buffer : points_to) {\n for (const BufferAlias& alias : GetBufferAliases(*buffer)) {\n for (HloInstruction* alias_user : alias.instruction()->users()) {\n if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(),\n alias_user)) {\n continue;\n }\n for (int64_t op_idx : alias_user->OperandIndices(alias.instruction())) {\n uses.emplace_back(alias_user, op_idx);\n }\n }\n }\n }\n return uses;\n}\nbool TuplePointsToAnalysis::HasUniqueFusedUseOfOperandAt(\n HloInstruction* operand, const ShapeIndex& operand_index,\n HloInstruction* fusion, const int64_t use_operand_index) const {\n CHECK_EQ(HloOpcode::kFusion, fusion->opcode());\n if (fusion->OperandIndices(operand).size() > 1) {\n return false;\n }\n const auto& fused_params = fusion->fused_parameters();\n auto fused_param_it =\n absl::c_find_if(fused_params, [&](HloInstruction* fused_param) {\n return fusion->operand(fused_param->parameter_number()) == operand;\n });\n if (fused_param_it == fused_params.end()) {\n return false;\n }\n auto* fused_param = *fused_param_it;\n auto fused_param_uses =\n GetAllUsesOfInstructionAtIndex(fused_param, operand_index);\n return fused_param_uses.size() == 1 &&\n fused_param_uses[0].first == fusion->fused_expression_root() &&\n fused_param_uses[0].second == use_operand_index;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/tuple_points_to_analysis.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/logical_buffer.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace {\nusing ::testing::UnorderedElementsAre;\nusing ::testing::UnorderedElementsAreArray;\nclass TuplePointsToAnalysisTest : public HloTestBase {\n protected:\n void BuildModuleAndRunAnalysis(std::unique_ptr computation) {\n BuildModule(std::move(computation));\n RunAnalysis();\n }\n void BuildModule(std::unique_ptr computation) {\n module_ = CreateNewVerifiedModule();\n module_->AddEntryComputation(std::move(computation));\n }\n void RunAnalysis() {\n CHECK_NOTNULL(module_.get());\n points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value();\n }\n const LogicalBuffer* GetBuffer(const HloInstruction* instruction,\n const ShapeIndex& index) {\n const auto& pointed_to =\n points_to_analysis_->GetPointsToSet(instruction).element(index);\n CHECK_EQ(1, pointed_to.size());\n CHECK_EQ(instruction, pointed_to[0]->instruction());\n CHECK(index == pointed_to[0]->index());\n return pointed_to[0];\n }\n void ExpectHasBuffers(const PointsToSet::BufferList& points_to_set,\n absl::Span buffers) {\n std::vector vec(buffers.begin(), buffers.end());\n EXPECT_THAT(points_to_set, UnorderedElementsAreArray(vec));\n }\n void ExpectHasTopLevelBuffers(\n const PointsToSet::BufferList& points_to_set,\n absl::Span instructions) {\n PointsToSet::BufferList buffers;\n for (auto instruction : instructions) {\n buffers.push_back(GetBuffer(instruction, {}));\n }\n ExpectHasBuffers(points_to_set, buffers);\n }\n void ExpectHasTopLevelBuffers(\n const PointsToSet::BufferSet& points_to_set,\n absl::Span instructions) {\n ExpectHasTopLevelBuffers(\n PointsToSet::BufferList(points_to_set.begin(), points_to_set.end()),\n instructions);\n }\n void ExpectHasBufferAliases(\n const HloInstruction* instruction, const ShapeIndex& index,\n absl::Span> expected) {\n const LogicalBuffer* buffer =\n points_to_analysis_->GetBufferDefinedAt(instruction, index).value();\n std::vector expected_aliases;\n expected_aliases.reserve(expected.size());\n for (auto& pair : expected) {\n expected_aliases.push_back(BufferAlias(pair.first, pair.second));\n }\n EXPECT_THAT(points_to_analysis_->GetBufferAliases(*buffer),\n UnorderedElementsAreArray(expected_aliases));\n }\n std::unique_ptr module_;\n std::unique_ptr points_to_analysis_;\n};\nTEST_F(TuplePointsToAnalysisTest, SimpleTuple) {\n auto builder = HloComputation::Builder(TestName());\n auto constant1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto constant2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n auto tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({constant1, constant2}));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant1).size());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1});\n EXPECT_TRUE(\n points_to_analysis_->GetPointsToSet(constant1).tuple_sources({}).empty());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct());\n EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant2).size());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2});\n EXPECT_TRUE(\n points_to_analysis_->GetPointsToSet(constant2).tuple_sources({}).empty());\n EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());\n EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),\n UnorderedElementsAre(tuple));\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),\n {constant1, constant2, tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2});\n const PointsToSet& tuple_points_to_set =\n points_to_analysis_->GetPointsToSet(tuple);\n EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex(\n *GetBuffer(constant1, {}), {0}));\n EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex(\n *GetBuffer(constant2, {}), {1}));\n EXPECT_FALSE(tuple_points_to_set.ContainsBufferAtIndex(\n *GetBuffer(constant2, {}), {0}));\n EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant1, {})));\n EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant2, {})));\n}\nTEST_F(TuplePointsToAnalysisTest, NestedTuple) {\n auto builder = HloComputation::Builder(TestName());\n auto constant1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto constant2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n auto inner_tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({constant1, constant2}));\n auto constant3 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(3.0)));\n auto tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({inner_tuple, constant3}));\n BuildModuleAndRunAnalysis(builder.Build());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(constant3).element({}), {constant3});\n EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(inner_tuple).size());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(inner_tuple).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(inner_tuple).IsDistinct());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(inner_tuple).CreateFlattenedSet(),\n {constant1, constant2, inner_tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(inner_tuple).element({}),\n {inner_tuple});\n EXPECT_THAT(\n points_to_analysis_->GetPointsToSet(inner_tuple).tuple_sources({}),\n UnorderedElementsAre(inner_tuple));\n EXPECT_EQ(5, points_to_analysis_->GetPointsToSet(tuple).size());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),\n {constant1, constant2, constant3, inner_tuple, tuple});\n EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),\n UnorderedElementsAre(tuple));\n EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({0}),\n UnorderedElementsAre(inner_tuple));\n EXPECT_TRUE(\n points_to_analysis_->GetPointsToSet(tuple).tuple_sources({1}).empty());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({0}), {inner_tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({0, 0}), {constant1});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({0, 1}), {constant2});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant3});\n}\nTEST_F(TuplePointsToAnalysisTest, GetTupleElement) {\n auto builder = HloComputation::Builder(TestName());\n auto constant1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto constant2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n auto inner_tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({constant1, constant2}));\n auto constant3 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(3.0)));\n auto tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({inner_tuple, constant3}));\n auto get_tuple_element = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(inner_tuple->shape(), tuple, 0));\n BuildModuleAndRunAnalysis(builder.Build());\n auto& points_to_set = points_to_analysis_->GetPointsToSet(get_tuple_element);\n EXPECT_EQ(3, points_to_set.size());\n EXPECT_FALSE(points_to_set.IsAmbiguous());\n EXPECT_TRUE(points_to_set.IsDistinct());\n ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(),\n {constant1, constant2, inner_tuple});\n ExpectHasTopLevelBuffers(points_to_set.element({}), {inner_tuple});\n EXPECT_THAT(points_to_set.tuple_sources({}),\n UnorderedElementsAre(inner_tuple));\n}\nTEST_F(TuplePointsToAnalysisTest, AddDependency) {\n auto builder = HloComputation::Builder(TestName());\n auto constant = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto token = builder.AddInstruction(HloInstruction::CreateToken());\n auto add_dependency = builder.AddInstruction(\n HloInstruction::CreateAddDependency(constant, token));\n BuildModuleAndRunAnalysis(builder.Build());\n auto& points_to_set = points_to_analysis_->GetPointsToSet(add_dependency);\n EXPECT_EQ(1, points_to_set.size());\n EXPECT_FALSE(points_to_set.IsAmbiguous());\n EXPECT_TRUE(points_to_set.IsDistinct());\n ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(), {constant});\n}\nTEST_F(TuplePointsToAnalysisTest, DuplicatedElement) {\n auto builder = HloComputation::Builder(TestName());\n auto constant = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({constant, constant, constant}));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_EQ(2, points_to_analysis_->GetPointsToSet(tuple).size());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),\n {constant, tuple});\n}\nTEST_F(TuplePointsToAnalysisTest, TupleCopy) {\n auto builder = HloComputation::Builder(TestName());\n auto constant1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto constant2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n auto tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({constant1, constant2}));\n auto copy = builder.AddInstruction(\n HloInstruction::CreateUnary(tuple->shape(), HloOpcode::kCopy, tuple));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy).IsDistinct());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),\n {constant1, constant2, tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(copy).element({}), {copy});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(copy).CreateFlattenedSet(),\n {constant1, constant2, copy});\n}\nTEST_F(TuplePointsToAnalysisTest, CopyStartAndCopyDone) {\n auto builder = HloComputation::Builder(TestName());\n auto constant = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto copy_start = builder.AddInstruction(HloInstruction::CreateCopyStart(\n ShapeUtil::MakeTupleShape({constant->shape(), constant->shape(),\n ShapeUtil::MakeShape(U32, {})}),\n constant));\n auto copy_done = builder.AddInstruction(HloInstruction::CreateUnary(\n constant->shape(), HloOpcode::kCopyDone, copy_start));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_start).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_start).IsDistinct());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_done).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_done).IsDistinct());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(copy_start).element({}),\n {copy_start});\n ExpectHasBufferAliases(copy_start, {0}, {{copy_start, {0}}, {copy_done, {}}});\n ExpectHasBufferAliases(constant, {}, {{constant, {}}, {copy_start, {1}}});\n}\nTEST_F(TuplePointsToAnalysisTest, AsyncOps) {\n std::string hlo_str = R\"(\n HloModule module\n ENTRY entry {\n p0 = f32[2,3] parameter(0)\n async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target=\"foo\"\n async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)\n ROOT async-done = f32[2,3] custom-call-done(async-update)\n }\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n module_, ParseAndReturnVerifiedModule(hlo_str, GetModuleConfigForTest()));\n HloInstruction* param =\n module_->entry_computation()->parameter_instruction(0);\n HloInstruction* async_start = FindInstruction(module_.get(), \"async-start\");\n HloInstruction* async_update = FindInstruction(module_.get(), \"async-update\");\n HloInstruction* async_done = FindInstruction(module_.get(), \"async-done\");\n RunAnalysis();\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_start).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_start).IsDistinct());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_update).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_update).IsDistinct());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_done).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_done).IsDistinct());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(async_start).element({}),\n {async_start});\n ExpectHasBufferAliases(\n param, {}, {{param, {}}, {async_start, {0, 0}}, {async_update, {0, 0}}});\n ExpectHasBufferAliases(\n async_start, {1},\n {{async_start, {1}}, {async_update, {1}}, {async_done, {}}});\n ExpectHasBufferAliases(async_start, {2},\n {{async_start, {2}}, {async_update, {2}}});\n}\nTEST_F(TuplePointsToAnalysisTest, SendAndSendDone) {\n auto builder = HloComputation::Builder(TestName());\n auto constant = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto token = builder.AddInstruction(HloInstruction::CreateToken());\n auto send = builder.AddInstruction(\n HloInstruction::CreateSend(constant, token, 0));\n auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send).IsDistinct());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send_done).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send_done).IsDistinct());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(send).element({}), {send});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(send).element({0}), {constant});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(send_done).CreateFlattenedSet(),\n {send_done});\n ExpectHasBufferAliases(constant, {}, {{constant, {}}, {send, {0}}});\n}\nTEST_F(TuplePointsToAnalysisTest, RecvAndRecvDone) {\n auto builder = HloComputation::Builder(TestName());\n auto token = builder.AddInstruction(HloInstruction::CreateToken());\n auto recv = builder.AddInstruction(HloInstruction::CreateRecv(\n ShapeUtil::MakeShape(F32, {1, 2, 3}), token, 0));\n auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv).IsDistinct());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv_done).IsAmbiguous());\n EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv_done).IsDistinct());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(recv).element({}), {recv});\n ExpectHasBufferAliases(recv, {0}, {{recv, {0}}, {recv_done, {0}}});\n}\nTEST_F(TuplePointsToAnalysisTest, TupleWithBitcast) {\n auto builder = HloComputation::Builder(TestName());\n auto constant1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto constant2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n auto bitcast = builder.AddInstruction(\n HloInstruction::CreateBitcast(constant2->shape(), constant2));\n auto tuple =\n builder.AddInstruction(HloInstruction::CreateTuple({constant1, bitcast}));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(bitcast).size());\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(bitcast).element({}), {constant2});\n EXPECT_TRUE(\n points_to_analysis_->GetPointsToSet(bitcast).tuple_sources({}).empty());\n EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size());\n EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());\n EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),\n UnorderedElementsAre(tuple));\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),\n {constant1, constant2, tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1});\n ExpectHasTopLevelBuffers(\n points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2});\n}\nTEST_F(TuplePointsToAnalysisTest, PointsToTupleConstantElements) {\n auto builder = HloComputation::Builder(TestName());\n Literal elements[] = {LiteralUtil::CreateR2({{1.0}, {2.0}}),\n LiteralUtil::CreateR1({2.0, 42})};\n auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::MakeTuple({&elements[0], &elements[1]})));\n auto copy = builder.AddInstruction(HloInstruction::CreateUnary(\n tuple_constant->shape(), HloOpcode::kCopy, tuple_constant));\n BuildModuleAndRunAnalysis(builder.Build());\n auto& points_to_set = points_to_analysis_->GetPointsToSet(copy);\n ExpectHasBuffers(points_to_set.element({}), {GetBuffer(copy, {})});\n ExpectHasBuffers(points_to_set.element({0}),\n {GetBuffer(tuple_constant, {0})});\n ExpectHasBuffers(points_to_set.element({1}),\n {GetBuffer(tuple_constant, {1})});\n}\nTEST_F(TuplePointsToAnalysisTest, BufferAliases) {\n auto builder = HloComputation::Builder(TestName());\n auto constant1 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n auto constant2 = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.0)));\n auto inner_tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({constant1, constant2}));\n auto tuple = builder.AddInstruction(\n HloInstruction::CreateTuple({inner_tuple, constant2}));\n BuildModuleAndRunAnalysis(builder.Build());\n ExpectHasBufferAliases(\n constant1, {},\n {{constant1, {}}, {inner_tuple, {0}}, {tuple, {0, 0}}});\n ExpectHasBufferAliases(\n constant2, {},\n {{constant2, {}}, {inner_tuple, {1}}, {tuple, {0, 1}}, {tuple, {1}}});\n ExpectHasBufferAliases(inner_tuple, {},\n {{inner_tuple, {}}, {tuple, {0}}});\n ExpectHasBufferAliases(tuple, {}, {{tuple, {}}});\n}\nTEST_F(TuplePointsToAnalysisTest, DISABLED_CustomCall) {\n auto builder = HloComputation::Builder(TestName());\n auto constant = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1.0)));\n Shape data_shape = ShapeUtil::MakeShape(F32, {});\n auto ccall = builder.AddInstruction(HloInstruction::CreateCustomCall(\n ShapeUtil::MakeTupleShape({data_shape, data_shape}), {constant},\n \"TestOp\"));\n Cast(ccall)->set_output_to_operand_aliasing(\n {std::pair>{\n ShapeIndex{1}, std::pair(0, {})}});\n auto gte0 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape, ccall, 0));\n auto gte1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape, ccall, 1));\n BuildModuleAndRunAnalysis(builder.Build());\n ExpectHasBufferAliases(ccall, {0}, {{gte0, {}}, {ccall, {0}}});\n ExpectHasBufferAliases(constant, {},\n {{constant, {}}, {gte1, {}}, {ccall, {1}}});\n}\nclass FusionPointsToAnalysisTest : public TuplePointsToAnalysisTest {\n protected:\n void Run(const std::string& hlo_str, int64_t expected_num_users) {\n TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_str));\n auto* fusion = module_->entry_computation()->root_instruction();\n auto* tuple_param0 = fusion->operand(0);\n RunAnalysis();\n auto* fusion_param = GetFusionParameterForOperand(fusion, tuple_param0);\n ExpectHasBuffers(\n points_to_analysis_->GetPointsToSet(fusion_param).element({}),\n {GetBuffer(fusion_param, {})});\n ExpectHasBuffers(\n points_to_analysis_->GetPointsToSet(fusion_param).element({0}),\n {GetBuffer(fusion_param, {0})});\n ExpectHasBuffers(\n points_to_analysis_->GetPointsToSet(fusion_param).element({1}),\n {GetBuffer(fusion_param, {1})});\n auto fused_gte0 = GetUniqueFusionParameterUserAt(fusion_param, 0);\n ExpectHasBuffers(\n points_to_analysis_->GetPointsToSet(fused_gte0).element({}),\n {GetBuffer(fusion_param, {0})});\n auto fused_gte1 = GetUniqueFusionParameterUserAt(fusion_param, 1);\n ExpectHasBuffers(\n points_to_analysis_->GetPointsToSet(fused_gte1).element({}),\n {GetBuffer(fusion_param, {1})});\n ExpectHasBufferAliases(fusion_param, {0},\n {{fusion_param, {0}}, {fused_gte0, {}}});\n ExpectHasBufferAliases(fusion_param, {1},\n {{fusion_param, {1}}, {fused_gte1, {}}});\n ExpectNumUsersOfAliases(fusion_param, {0}, expected_num_users);\n }\n HloInstruction* GetFusionParameterForOperand(HloInstruction* fusion,\n const HloInstruction* operand) {\n const auto& fused_instructions = fusion->fused_instructions();\n auto it =\n absl::c_find_if(fused_instructions, [&](const HloInstruction* fused) {\n return fused->opcode() == HloOpcode::kParameter &&\n fusion->operand(fused->parameter_number()) == operand;\n });\n CHECK(it != fusion->fused_instructions().end());\n return *it;\n }\n std::vector GetFusionParameterUsersAt(\n HloInstruction* fusion_param, int64_t tuple_index) {\n CHECK(fusion_param->shape().IsTuple());\n std::vector users_at_tuple_index;\n for (auto user : fusion_param->users()) {\n CHECK_EQ(HloOpcode::kGetTupleElement, user->opcode());\n if (user->tuple_index() == tuple_index) {\n users_at_tuple_index.push_back(user);\n }\n }\n return users_at_tuple_index;\n }\n HloInstruction* GetUniqueFusionParameterUserAt(HloInstruction* fusion_param,\n int64_t tuple_index) {\n std::vector users =\n GetFusionParameterUsersAt(fusion_param, tuple_index);\n CHECK_EQ(1, users.size());\n return users[0];\n }\n void ExpectNumUsersOfAliases(const HloInstruction* instruction,\n const ShapeIndex& index,\n const int64_t expected_num_users) {\n const auto* buffer = GetBuffer(instruction, index);\n int64_t num_users = 0;\n for (const auto& alias : points_to_analysis_->GetBufferAliases(*buffer)) {\n for (auto user : alias.instruction()->users()) {\n if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) {\n continue;\n }\n ++num_users;\n }\n }\n EXPECT_EQ(expected_num_users, num_users);\n }\n};\nTEST_F(FusionPointsToAnalysisTest, FusionParam0OneUser) {\n std::string hlo_str = R\"(\nHloModule FusionParam0OneUser\n%fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] {\n %param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0)\n %get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0\n %get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1\n %constant.3 = f32[3]{0} constant({1, 1, 1})\n %add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3)\n %constant.2 = s32[] constant(0)\n ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.1, s32[] %constant.2)\n}\nENTRY %FusionParam0OneUser (param0: (f32[8], f32[3])) -> f32[8] {\n %param0 = (f32[8]{0}, f32[3]{0}) parameter(0)\n ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation\n}\n)\";\n Run(hlo_str, 1);\n}\nTEST_F(FusionPointsToAnalysisTest, FusionParam0TwoUsers) {\n std::string hlo_str = R\"(\nHloModule FusionParam0TwoUsers\n%fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] {\n %param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0)\n %get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0\n %get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1\n %constant.3 = f32[3]{0} constant({1, 1, 1})\n %add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3)\n %slice = f32[3]{0} slice(f32[8]{0} %get-tuple-element.1), slice={[0:3]}\n %add.2 = f32[3]{0} add(f32[3]{0} %add.1, f32[3]{0} %slice)\n %constant.2 = s32[] constant(0)\n ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.2, s32[] %constant.2)\n}\nENTRY %FusionParam0TwoUsers (param0: (f32[8], f32[3])) -> f32[8] {\n %param0 = (f32[8]{0}, f32[3]{0}) parameter(0)\n ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation\n}\n)\";\n Run(hlo_str, 2);\n}\nclass PointsToAnalysisTestBase : public HloTestBase {\n protected:\n void BuildModule(std::unique_ptr computation) {\n module_ = CreateNewVerifiedModule();\n computation_ = module_->AddEntryComputation(std::move(computation));\n }\n void RunAnalysis() {\n CHECK_NOTNULL(module_.get());\n points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value();\n }\n void BuildModuleAndRunAnalysis(std::unique_ptr computation) {\n BuildModule(std::move(computation));\n RunAnalysis();\n }\n std::unique_ptr module_;\n HloComputation* computation_ = nullptr;\n std::unique_ptr points_to_analysis_;\n};\nclass DoesNotUseOperandBufferTest : public PointsToAnalysisTestBase {};\nTEST_F(DoesNotUseOperandBufferTest, GetTupleElement) {\n auto builder = HloComputation::Builder(TestName());\n Shape elem_shape = ShapeUtil::MakeShape(F32, {8});\n auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeTupleShape({elem_shape, elem_shape}), \"tuple\"));\n auto gte0 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(elem_shape, tuple, 0));\n auto gte1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(elem_shape, tuple, 1));\n builder.AddInstruction(\n HloInstruction::CreateBinary(elem_shape, HloOpcode::kAdd, gte0, gte1));\n BuildModuleAndRunAnalysis(builder.Build());\n EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, gte0));\n EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, gte1));\n EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte0));\n EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte1));\n}\nTEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) {\n auto builder = HloComputation::Builder(TestName());\n Shape data_shape = ShapeUtil::MakeShape(F32, {8});\n auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), \"tuple\"));\n auto gte0 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape, tuple, 0));\n auto gte1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(data_shape, tuple, 1));\n auto starts = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(2)));\n auto update = builder.AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR1({2.f, 2.f, 2.f})));\n auto dynamic_update_slice =\n builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(\n data_shape, gte1, update, {starts}));\n builder.AddInstruction(\n HloInstruction::CreateTuple({gte0, dynamic_update_slice}));\n BuildModule(builder.Build());\n auto fusion = computation_->CreateFusionInstruction(\n {dynamic_update_slice, starts, update, gte1},\n HloInstruction::FusionKind::kLoop);\n RunAnalysis();\n EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, fusion));\n EXPECT_FALSE(\n points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, fusion));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1165,"cells":{"ID":{"kind":"string","value":"c7d36866-411c-46ed-801c-d8994c2f600b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"scatter_expander"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/scatter_expander.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/scatter_expander_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/transforms/scatter_expander.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/primitive_util.h\"\nnamespace xla {\nbool GpuScatterExpander::InstructionMatchesPattern(HloInstruction* inst) {\n return inst->opcode() == HloOpcode::kScatter &&\n (inst->shape().IsTuple() ||\n primitive_util::BitWidth(inst->shape().element_type()) > 64);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/scatter_expander.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/types.h\"\nnamespace xla {\nnamespace {\nclass ScatterExpanderTest : public HloTestBase {\n protected:\n void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) {\n HloInstruction* inst = FindInstruction(module, inst_name);\n inst->mutable_shape()->clear_layout();\n }\n};\nTEST_F(ScatterExpanderTest, ScatterOperandWithoutLayout) {\n const char* kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n parameter0 = s32[] parameter(0)\n ROOT parameter1 = s32[] parameter(1)\n }\n ENTRY kernel_entry {\n operand = s32[5] iota(), iota_dimension=0\n indices = s32[1] parameter(0)\n update = s32[] constant(0)\n ROOT scatter = s32[5]{0} scatter(operand, indices, update),\n update_window_dims={}, inserted_window_dims={0},\n scatter_dims_to_operand_dims={0}, index_vector_dim=0,\n to_apply=scatter_computation\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ClearInstructionLayout(module.get(), \"operand\");\n ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_TRUE(result);\n}\nTEST_F(ScatterExpanderTest, ScatterMultipleOperandsWithoutLayout) {\n const char* kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n p0 = s32[] parameter(0)\n p1 = f32[] parameter(1)\n p2 = s32[] parameter(2)\n p3 = f32[] parameter(3)\n ROOT tuple = tuple(p2, p3)\n }\n ENTRY kernel_entry {\n operand0 = s32[5] iota(), iota_dimension=0\n operand1 = f32[5] constant({2,4,6,8,10})\n indices = s32[1] parameter(0)\n update0 = s32[] constant(0)\n update1 = f32[] constant(1)\n ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),\n update_window_dims={}, inserted_window_dims={0},\n scatter_dims_to_operand_dims={0}, index_vector_dim=0,\n to_apply=scatter_computation\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ClearInstructionLayout(module.get(), \"operand0\");\n ClearInstructionLayout(module.get(), \"operand1\");\n ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_TRUE(result);\n}\nTEST_F(ScatterExpanderTest, EliminateSimpleScattersSkipsNontrivialScatter) {\n const char* kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n parameter0 = s32[] parameter(0)\n ROOT parameter1 = s32[] parameter(1)\n }\n ENTRY kernel_entry {\n operand = s32[3,3] parameter(0)\n indices = s32[2] parameter(1)\n updates = s32[2,3] parameter(2)\n ROOT scatter = s32[3,3] scatter(operand, indices, updates),\n to_apply=scatter_computation,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ClearInstructionLayout(module.get(), \"operand\");\n ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_FALSE(result);\n}\nTEST_F(ScatterExpanderTest,\n EliminateSimpleMultioutpuScattersSkipsNontrivialScatter) {\n const char* kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n p0 = s32[] parameter(0)\n p1 = f32[] parameter(1)\n p2 = s32[] parameter(2)\n p3 = f32[] parameter(3)\n ROOT tuple = tuple(p2, p3)\n }\n ENTRY kernel_entry {\n operand0 = s32[3,3] parameter(0)\n operand1 = bf16[3,3] parameter(1)\n indices = s32[2] parameter(2)\n update0 = s32[2,3] parameter(3)\n update1 = bf16[2,3] parameter(4)\n ROOT scatter = (s32[3,3], bf16[3,3]) scatter(operand0, operand1, indices, update0, update1),\n to_apply=scatter_computation,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ClearInstructionLayout(module.get(), \"operand0\");\n ClearInstructionLayout(module.get(), \"operand1\");\n ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_FALSE(result);\n}\nTEST_F(ScatterExpanderTest, EliminateSimpleScattersRewritesTrivialScatter) {\n const char* kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n parameter0 = s32[] parameter(0)\n ROOT parameter1 = s32[] parameter(1)\n }\n ENTRY kernel_entry {\n operand = s32[5] iota(), iota_dimension=0\n indices = s32[1] parameter(0)\n update = s32[] constant(0)\n ROOT scatter = s32[5]{0} scatter(operand, indices, update),\n update_window_dims={}, inserted_window_dims={0},\n scatter_dims_to_operand_dims={0}, index_vector_dim=0,\n to_apply=scatter_computation\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ClearInstructionLayout(module.get(), \"operand\");\n ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_TRUE(result);\n}\nTEST_F(ScatterExpanderTest,\n EliminateSimpleMultioutputScattersRewritesTrivialScatter) {\n const char* kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n p0 = s32[] parameter(0)\n p1 = f32[] parameter(1)\n p2 = s32[] parameter(2)\n p3 = f32[] parameter(3)\n ROOT tuple = tuple(p2, p3)\n }\n ENTRY kernel_entry {\n operand0 = s32[5] iota(), iota_dimension=0\n operand1 = f32[5] iota(), iota_dimension=0\n indices = s32[1] parameter(0)\n update0 = s32[] constant(0)\n update1 = f32[] constant(0)\n ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),\n update_window_dims={}, inserted_window_dims={0},\n scatter_dims_to_operand_dims={0}, index_vector_dim=0,\n to_apply=scatter_computation\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ClearInstructionLayout(module.get(), \"operand0\");\n ClearInstructionLayout(module.get(), \"operand1\");\n ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_TRUE(result);\n}\nTEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeCombiner) {\n const char* const kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n arg1.173 = s32[] parameter(1)\n arg0.172 = s32[] parameter(0)\n ROOT add.48 = s32[] add(arg0.172, arg1.173)\n }\n ENTRY fused_computation {\n bitcast.2335 = s32[1,4096] parameter(0)\n pad.96 = s32[4096,2] parameter(1)\n bitcast.2748 = s32[4096,1,1] parameter(2)\n ROOT scatter.48 = s32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),\n update_window_dims={1,2}, inserted_window_dims={},\n scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,\n to_apply=scatter_computation\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ScatterExpander scatter_expander(\n ScatterExpander::kEliminateIndeterministicScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_FALSE(result);\n}\nTEST_F(ScatterExpanderTest, EliminateScatterWithNonAssociativeCombiner) {\n const char* const kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n arg1.173 = f32[] parameter(1)\n arg0.172 = f32[] parameter(0)\n ROOT add.48 = f32[] add(arg0.172, arg1.173)\n }\n ENTRY fused_computation {\n bitcast.2335 = f32[1,4096] parameter(0)\n pad.96 = s32[4096,2] parameter(1)\n bitcast.2748 = f32[4096,1,1] parameter(2)\n ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),\n update_window_dims={1,2}, inserted_window_dims={},\n scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,\n to_apply=scatter_computation\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ScatterExpander scatter_expander(\n ScatterExpander::kEliminateIndeterministicScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_TRUE(result);\n}\nTEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeFp32Combiner) {\n const char* const kModuleStr = R\"(\n HloModule scatter_expander\n scatter_computation {\n arg1.173 = f32[] parameter(1)\n arg0.172 = f32[] parameter(0)\n ROOT max.48 = f32[] maximum(arg0.172, arg1.173)\n }\n ENTRY fused_computation {\n bitcast.2335 = f32[1,4096] parameter(0)\n pad.96 = s32[4096,2] parameter(1)\n bitcast.2748 = f32[4096,1,1] parameter(2)\n ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),\n update_window_dims={1,2}, inserted_window_dims={},\n scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,\n to_apply=scatter_computation\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ScatterExpander scatter_expander(\n ScatterExpander::kEliminateIndeterministicScatters);\n TF_ASSERT_OK_AND_ASSIGN(bool result,\n RunHloPass(&scatter_expander, module.get()));\n EXPECT_FALSE(result);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scatter_expander.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_expander_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1166,"cells":{"ID":{"kind":"string","value":"931a8bc3-ba94-4243-bc39-9a972cf91eb2"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dynamic_dimension_inference"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/dynamic_dimension_inference.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/dynamic_dimension_inference_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/dynamic_dimension_inference.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/functional/function_ref.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/comparison_util.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/dynamic_parameter_binding.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/call_inliner.h\"\n#include \"xla/service/dynamic_window_utils.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/service/hlo_dataflow_analysis.h\"\n#include \"xla/service/hlo_value.h\"\n#include \"xla/service/tuple_util.h\"\n#include \"xla/service/while_util.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_tree.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"xla/window_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nabsl::StatusOr>\nWidenComputation(HloComputation* narrow_comp, const Shape& wide_shape) {\n TF_RET_CHECK(wide_shape.IsTuple());\n const Shape& narrow_shape = narrow_comp->parameter_instruction(0)->shape();\n if (Shape::Equal()(wide_shape, narrow_shape)) {\n return std::make_pair(narrow_comp, CallInliner::InlinedInstructionMap());\n }\n HloComputation* wide_comp = [&]() {\n HloComputation::Builder builder(absl::StrCat(\"wide.\", narrow_comp->name()));\n builder.AddInstruction(HloInstruction::CreateParameter(\n 0, wide_shape,\n absl::StrCat(\"wide.\", narrow_comp->parameter_instruction(0)->name())));\n return narrow_comp->parent()->AddEmbeddedComputation(builder.Build());\n }();\n HloInstruction* wide_parameter = wide_comp->parameter_instruction(0);\n HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix(\n wide_parameter, narrow_shape.tuple_shapes_size(),\n absl::StrCat(\"renarrowed.\",\n narrow_comp->parameter_instruction(0)->name()));\n HloInstruction* call_narrow_comp = wide_comp->AddInstruction(\n HloInstruction::CreateCall(narrow_comp->root_instruction()->shape(),\n {truncated_parameter}, narrow_comp));\n wide_comp->set_root_instruction(call_narrow_comp,\n true);\n TF_ASSIGN_OR_RETURN(auto inline_map, CallInliner::Inline(call_narrow_comp));\n return std::make_pair(wide_comp, std::move(inline_map));\n}\n} \nclass DynamicDimensionInferenceVisitor : public DfsHloRewriteVisitor {\n public:\n explicit DynamicDimensionInferenceVisitor(\n const DynamicParameterBinding& param_bindings,\n HloDataflowAnalysis& dataflow_analysis, DynamicDimensionInference* parent,\n DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler,\n DynamicDimensionInference::ShapeCheckMode shape_check_mode,\n DynamicDimensionInference::AssertionGenerator assertion_generator)\n : param_bindings_(param_bindings),\n dataflow_analysis_(dataflow_analysis),\n parent_(parent),\n custom_call_handler_(std::move(custom_call_handler)),\n shape_check_mode_(shape_check_mode),\n assertion_generator_(assertion_generator) {}\n absl::Status DefaultAction(HloInstruction* hlo) override;\n static absl::StatusOr Run(\n HloComputation* computation, HloDataflowAnalysis& dataflow_analysis,\n const DynamicParameterBinding& param_bindings,\n DynamicDimensionInference* parent,\n DynamicDimensionInference::CustomCallInferenceHandler\n custom_call_handler = nullptr,\n DynamicDimensionInference::ShapeCheckMode shape_check_mode =\n DynamicDimensionInference::ShapeCheckMode::kIgnore,\n const DynamicDimensionInference::AssertionGenerator& assertion_generator =\n nullptr) {\n if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),\n parent->execution_threads_)) {\n return false;\n }\n DynamicDimensionInferenceVisitor visitor(\n param_bindings, dataflow_analysis, parent,\n std::move(custom_call_handler), shape_check_mode, assertion_generator);\n TF_RETURN_IF_ERROR(computation->Accept(&visitor));\n if (visitor.shape_assertion_ != nullptr) {\n CHECK(assertion_generator);\n assertion_generator(visitor.shape_assertion_);\n }\n return visitor.changed();\n }\n absl::Status HandleParameter(HloInstruction* hlo) override;\n absl::Status HandleInfeed(HloInstruction* hlo) override;\n absl::Status HandleConstant(HloInstruction* hlo) override;\n absl::Status HandleReduce(HloInstruction* hlo) override;\n absl::Status HandleDot(HloInstruction* hlo) override;\n absl::Status HandleTuple(HloInstruction* hlo) override;\n absl::Status HandleTranspose(HloInstruction* hlo) override;\n absl::Status HandleDynamicReshape(HloInstruction* hlo) override;\n absl::Status HandleReshape(HloInstruction* hlo) override;\n absl::Status HandleSort(HloInstruction* hlo) override;\n absl::Status HandlePad(HloInstruction* hlo) override;\n absl::Status HandleCustomCall(HloInstruction* hlo) override;\n absl::Status HandleBroadcast(HloInstruction* hlo) override;\n absl::Status HandleGetDimensionSize(HloInstruction* hlo) override;\n absl::Status HandleSetDimensionSize(HloInstruction* hlo) override;\n absl::Status HandleSelect(HloInstruction* hlo) override;\n absl::Status HandleConvolution(HloInstruction* hlo) override;\n absl::Status HandleConcatenate(HloInstruction* hlo) override;\n absl::Status HandleReduceWindow(HloInstruction* hlo) override;\n absl::Status HandleReverse(HloInstruction* hlo) override;\n absl::Status HandleSelectAndScatter(HloInstruction* hlo) override;\n absl::Status HandleGetTupleElement(HloInstruction* hlo) override;\n absl::Status HandleElementwiseUnary(HloInstruction* hlo) override;\n absl::Status HandleElementwiseNary(HloInstruction* hlo);\n absl::Status HandleElementwiseBinary(HloInstruction* hlo) override;\n absl::Status HandleClamp(HloInstruction* hlo) override;\n absl::Status HandleConditional(HloInstruction* hlo) override;\n absl::Status HandleWhile(HloInstruction* hlo) override;\n absl::Status HandleSlice(HloInstruction* hlo) override;\n absl::Status HandleDynamicSlice(HloInstruction* hlo) override;\n absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override;\n absl::Status HandleGather(HloInstruction* hlo) override;\n absl::Status HandleScatter(HloInstruction* hlo) override;\n absl::Status HandleMap(HloInstruction* hlo) override;\n absl::Status HandleDomain(HloInstruction* hlo) override;\n absl::Status HandleAsyncStart(HloInstruction* hlo) override;\n absl::Status HandleAsyncDone(HloInstruction* hlo) override;\n private:\n using OperandDynamicDimensionFn = absl::FunctionRef;\n using DynamicDimensionFn = std::function;\n void SetDynamicSize(HloInstruction* inst, const ShapeIndex& index,\n int64_t dim, HloInstruction* size,\n bool clear_dynamic_dimension = true);\n void SetDynamicSizes(HloInstruction* inst, const ShapeIndex& index,\n absl::Span sizes);\n absl::Status HandleDynamicConvolutionForward(HloInstruction* hlo,\n int64_t operand_index,\n int64_t dimension,\n HloInstruction* dynamic_size);\n absl::Status HandleDynamicConvolutionKernelGrad(HloInstruction* hlo,\n int64_t operand_index,\n int64_t dimension);\n absl::Status HandleDynamicConvolutionInputGrad(HloInstruction* hlo,\n int64_t operand_index,\n int64_t dimension);\n absl::Status HandleDynamicWindowSamePadding(HloInstruction* hlo,\n HloInstruction* dynamic_size,\n int64_t operand_index,\n int64_t dimension);\n absl::Status ForEachOperandDynamicDimension(HloInstruction* inst,\n OperandDynamicDimensionFn);\n absl::Status ForEachDynamicDimensionInOperand(HloInstruction* inst,\n int64_t operand_index,\n OperandDynamicDimensionFn);\n absl::Status ForEachDynamicDimension(HloInstruction* inst,\n const DynamicDimensionFn& fn);\n bool CanInfer(HloInstruction* hlo) { return parent_->CanInfer(hlo); }\n absl::StatusOr RequiresPadToStatic(HloInstruction* instr,\n ShapeIndex shape_index);\n absl::Status InsertPadToStaticOnInstruction(HloInstruction* inst);\n absl::Status InsertShapeCheck(HloInstruction* dim1, HloInstruction* dim2,\n bool support_implicit_broadcast);\n absl::Status PassThroughDynamicDimension(HloInstruction*);\n const DynamicParameterBinding& param_bindings_;\n HloDataflowAnalysis& dataflow_analysis_;\n DynamicDimensionInference* parent_;\n DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler_;\n DynamicDimensionInference::ShapeCheckMode shape_check_mode_;\n HloInstruction* shape_assertion_ = nullptr;\n DynamicDimensionInference::AssertionGenerator assertion_generator_;\n};\nvoid DynamicDimensionInferenceVisitor::SetDynamicSize(\n HloInstruction* inst, const ShapeIndex& index, int64_t dim,\n HloInstruction* size, bool clear_dynamic_dimension) {\n parent_->SetDynamicSize(inst, index, dim, size);\n if (clear_dynamic_dimension) {\n ShapeUtil::GetMutableSubshape(inst->mutable_shape(), index)\n ->set_dynamic_dimension(dim, false);\n }\n MarkAsChanged();\n}\nvoid DynamicDimensionInferenceVisitor::SetDynamicSizes(\n HloInstruction* inst, const ShapeIndex& index,\n absl::Span sizes) {\n const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index);\n CHECK(subshape.IsArray() && subshape.rank() == sizes.size());\n for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {\n if (sizes[dimension] != nullptr) {\n SetDynamicSize(inst, index, dimension, sizes[dimension]);\n }\n }\n}\nabsl::Status DynamicDimensionInferenceVisitor::DefaultAction(\n HloInstruction* hlo) {\n return ForEachOperandDynamicDimension(\n hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n return UnimplementedStrCat(\n \"Asked to propagate a dynamic dimension from hlo \", operand->name(),\n \"@\", index.ToString(), \"@\", dimension, \" to hlo \", hlo->ToString(),\n \", which is not implemented.\");\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleGetTupleElement(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n if (hlo->tuple_index() != index[0]) {\n return absl::OkStatus();\n }\n ShapeIndex new_index(ShapeIndexView(index).subspan(1));\n SetDynamicSize(hlo, new_index, dimension, dynamic_size);\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleTuple(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo, [&](HloInstruction*, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n index.push_front(operand_index);\n SetDynamicSize(hlo, index, dimension, dynamic_size);\n return absl::OkStatus();\n }));\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleBroadcast(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n int64_t broadcast_dim = hlo->dimensions(dimension);\n SetDynamicSize(hlo, {}, broadcast_dim, dynamic_size);\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleConstant(\n HloInstruction* hlo) {\n if (!hlo->shape().is_dynamic()) {\n return absl::OkStatus();\n }\n auto* constant = Cast(hlo);\n ShapeTree do_pad(constant->shape(), false);\n Shape padded_shape = constant->shape();\n bool pad_any = false;\n TF_RETURN_IF_ERROR(ShapeUtil::ForEachMutableSubshapeWithStatus(\n &padded_shape,\n [&](Shape* subshape, const ShapeIndex& index) -> absl::Status {\n if (!subshape->IsArray()) {\n return absl::OkStatus();\n }\n TF_ASSIGN_OR_RETURN(bool requires_pad, RequiresPadToStatic(hlo, index));\n if (requires_pad) {\n pad_any = *do_pad.mutable_element(index) = true;\n *subshape = ShapeUtil::MakeStaticShape(*subshape);\n }\n return absl::OkStatus();\n }));\n if (!pad_any) {\n return absl::OkStatus();\n }\n Literal padded_literal(padded_shape);\n do_pad.ForEachElement([&](const ShapeIndex& index, bool requires_pad) {\n const Shape& subshape = ShapeUtil::GetSubshape(padded_shape, index);\n if (!subshape.IsArray()) {\n return absl::OkStatus();\n }\n TF_RETURN_IF_ERROR(padded_literal.CopyFrom(constant->literal(), index,\n index,\n true));\n if (!requires_pad) {\n for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {\n if (subshape.is_dynamic_dimension(dimension)) {\n padded_literal.SetDynamicSize(\n dimension, index,\n constant->literal().GetDynamicSize(dimension, index));\n }\n }\n }\n return absl::OkStatus();\n });\n auto* padded_constant = hlo->AddInstruction(\n HloInstruction::CreateConstant(std::move(padded_literal)));\n TF_RETURN_IF_ERROR(constant->ReplaceAllUsesWith(padded_constant));\n SetVisited(*padded_constant);\n TF_RETURN_IF_ERROR(do_pad.ForEachElementWithStatus(\n [&](const ShapeIndex& index, bool requires_pad) -> absl::Status {\n if (!requires_pad) {\n return absl::OkStatus();\n }\n const Shape& subshape =\n ShapeUtil::GetSubshape(constant->shape(), index);\n TF_RET_CHECK(subshape.IsArray());\n for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) {\n if (!subshape.is_dynamic_dimension(dimension)) {\n continue;\n }\n HloInstruction* dynamic_size = hlo->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n constant->literal().GetDynamicSize(dimension, index))));\n SetVisited(*dynamic_size);\n SetDynamicSize(padded_constant, index, dimension, dynamic_size);\n }\n return absl::OkStatus();\n }));\n MarkAsChanged();\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleCustomCall(\n HloInstruction* hlo) {\n if (hlo->custom_call_target() == \"PadToStatic\") {\n for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {\n if (hlo->operand(0)->shape().is_dynamic_dimension(i)) {\n HloInstruction* dynamic_size =\n hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(\n ShapeUtil::MakeScalarShape(S32), hlo, i + 1));\n ShapeIndex data_output = {0};\n SetDynamicSize(hlo, data_output, i, dynamic_size);\n }\n }\n return absl::OkStatus();\n }\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n if (custom_call_handler_) {\n TF_RETURN_IF_ERROR(custom_call_handler_(hlo, parent_));\n } else {\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index,\n HloInstruction* dynamic_size) -> absl::Status {\n if (hlo->custom_call_target() == \"SliceToDynamic\" ||\n hlo->custom_call_target() == \"Sharding\" ||\n (absl::StartsWith(hlo->custom_call_target(), \"Resize\") &&\n (dimension == 0 || dimension == 3))) {\n SetDynamicSize(hlo, {}, dimension, dynamic_size);\n return absl::OkStatus();\n }\n if (hlo->custom_call_target() == \"DynamicReduceWindowSamePadding\") {\n if (hlo->operand_count() > 2) {\n return Unimplemented(\n \"DynamicReduceWindowSamePadding doesn't support variadic \"\n \"reduce window %s\",\n hlo->ToString());\n }\n return HandleDynamicWindowSamePadding(hlo, dynamic_size,\n operand_index, dimension);\n }\n if (hlo->custom_call_target() ==\n \"DynamicSelectAndScatterSamePadding\") {\n if (operand_index == 1) {\n return absl::OkStatus();\n }\n SetDynamicSize(hlo, {}, dimension, dynamic_size);\n return absl::OkStatus();\n }\n if (hlo->custom_call_target() == \"DynamicConvolutionInputGrad\") {\n return HandleDynamicConvolutionInputGrad(hlo, operand_index,\n dimension);\n }\n if (hlo->custom_call_target() == \"DynamicConvolutionKernelGrad\") {\n return HandleDynamicConvolutionKernelGrad(hlo, operand_index,\n dimension);\n }\n if (hlo->custom_call_target() == \"DynamicConvolutionForward\") {\n return HandleDynamicConvolutionForward(hlo, operand_index,\n dimension, dynamic_size);\n }\n return Unimplemented(\n \"CustomCall \\\"%s\\\" is not supported to have a dynamic dimension\",\n hlo->custom_call_target());\n }));\n }\n return InsertPadToStaticOnInstruction(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleSort(HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dynamic_dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n HloSortInstruction* sort = Cast(hlo);\n if (sort->values_count() == 0) {\n SetDynamicSize(hlo, {}, dynamic_dimension, dynamic_size);\n } else {\n SetDynamicSize(hlo, {operand_index}, dynamic_dimension, dynamic_size);\n }\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandlePad(HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n if (operand_index != 0) {\n return Unimplemented(\n \"Dynamic dimension on padding value is not supported\");\n }\n const PaddingConfig_PaddingConfigDimension& padding_config =\n hlo->padding_config().dimensions(dimension);\n HloInstruction* dynamic_size_adjusted = dynamic_size;\n if (padding_config.interior_padding() != 0) {\n HloInstruction* one =\n hlo->parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(1)));\n HloInstruction* zero =\n hlo->parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(0)));\n HloInstruction* interior_padding = hlo->parent()->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n padding_config.interior_padding())));\n dynamic_size_adjusted =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size_adjusted->shape(), HloOpcode::kSubtract,\n dynamic_size_adjusted, one));\n dynamic_size_adjusted =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size_adjusted->shape(), HloOpcode::kMaximum,\n dynamic_size_adjusted, zero));\n dynamic_size_adjusted =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size_adjusted->shape(), HloOpcode::kMultiply,\n dynamic_size_adjusted, interior_padding));\n dynamic_size_adjusted =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size_adjusted->shape(), HloOpcode::kAdd,\n dynamic_size_adjusted, dynamic_size));\n }\n HloInstruction* adjustment = hlo->parent()->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n padding_config.edge_padding_low() +\n padding_config.edge_padding_high())));\n dynamic_size_adjusted =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size_adjusted->shape(), HloOpcode::kAdd,\n dynamic_size_adjusted, adjustment));\n SetDynamicSize(hlo, {}, dimension, dynamic_size_adjusted);\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleReduce(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n auto* reduce = Cast(hlo);\n int64_t rank = -1;\n TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(\n reduce->shape(),\n [&](const Shape& subshape, const ShapeIndex& index) -> absl::Status {\n if (!subshape.IsArray()) {\n return absl::OkStatus();\n }\n if (rank < 0) {\n rank = subshape.rank();\n } else {\n TF_RET_CHECK(rank == subshape.rank());\n }\n return absl::OkStatus();\n }));\n TF_RET_CHECK(rank >= 0);\n absl::InlinedVector dynamic_sizes(rank, nullptr);\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n int64_t operand_count = reduce->operand_count();\n CHECK_EQ(operand_count % 2, 0);\n if (operand_index >= reduce->input_count()) {\n return absl::OkStatus();\n }\n if (absl::c_count(reduce->dimensions(), dimension) != 0) {\n return absl::OkStatus();\n }\n int64_t dimensions_not_reduced_count = 0;\n for (int64_t i = 0; i < operand->shape().rank(); ++i) {\n if (dimension == i) {\n dynamic_sizes[dimensions_not_reduced_count] = dynamic_size;\n return absl::OkStatus();\n }\n if (!absl::c_linear_search(reduce->dimensions(), i)) {\n dimensions_not_reduced_count++;\n }\n }\n return absl::OkStatus();\n }));\n ShapeUtil::ForEachSubshape(\n reduce->shape(), [&](const Shape& subshape, ShapeIndex shape_index) {\n if (!subshape.IsArray()) {\n return;\n }\n SetDynamicSizes(reduce, shape_index, dynamic_sizes);\n });\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleDot(HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n absl::InlinedVector dynamic_sizes(hlo->shape().rank(),\n nullptr);\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex operand_shape_index,\n int64_t operand_dimension, int64_t operand_index,\n HloInstruction* dynamic_size) -> absl::Status {\n HloInstruction* dot = hlo;\n const DotDimensionNumbers& dimension_numbers =\n dot->dot_dimension_numbers();\n absl::flat_hash_map result_dim_mapping;\n int64_t current_result_dims = 0;\n bool lhs = operand_index == 0;\n if (lhs) {\n for (int64_t i : dimension_numbers.lhs_batch_dimensions()) {\n result_dim_mapping[i] = current_result_dims++;\n }\n } else {\n for (int64_t i : dimension_numbers.rhs_batch_dimensions()) {\n result_dim_mapping[i] = current_result_dims++;\n }\n }\n for (int64_t i = 0; i < dot->operand(0)->shape().rank(); i++) {\n if (absl::c_linear_search(\n dimension_numbers.lhs_contracting_dimensions(), i)) {\n continue;\n }\n if (absl::c_linear_search(dimension_numbers.lhs_batch_dimensions(),\n i)) {\n continue;\n }\n if (lhs) {\n result_dim_mapping[i] = current_result_dims;\n }\n current_result_dims++;\n }\n for (int64_t i = 0; i < dot->operand(1)->shape().rank(); i++) {\n if (absl::c_linear_search(\n dimension_numbers.rhs_contracting_dimensions(), i)) {\n continue;\n }\n if (absl::c_linear_search(dimension_numbers.rhs_batch_dimensions(),\n i)) {\n continue;\n }\n if (!lhs) {\n result_dim_mapping[i] = current_result_dims;\n }\n current_result_dims++;\n }\n auto iter = result_dim_mapping.find(operand_dimension);\n if (iter != result_dim_mapping.end()) {\n dynamic_sizes[iter->second] = dynamic_size;\n }\n return absl::OkStatus();\n }));\n SetDynamicSizes(hlo, {}, dynamic_sizes);\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleTranspose(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n int64_t permuted_dim = -1;\n for (int64_t i = 0; i < hlo->dimensions().size(); ++i) {\n if (hlo->dimensions()[i] == dimension) {\n TF_RET_CHECK(permuted_dim == -1);\n permuted_dim = i;\n }\n }\n SetDynamicSize(hlo, {}, permuted_dim, dynamic_size);\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleConvolution(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n HloInstruction* conv = hlo;\n const ConvolutionDimensionNumbers& dimension_numbers =\n conv->convolution_dimension_numbers();\n if (operand_index == 0) {\n if (dimension == dimension_numbers.input_batch_dimension()) {\n SetDynamicSize(conv, {}, dimension_numbers.output_batch_dimension(),\n dynamic_size);\n return absl::OkStatus();\n }\n if (dimension == dimension_numbers.input_feature_dimension()) {\n return absl::OkStatus();\n }\n } else {\n if (dimension == dimension_numbers.kernel_input_feature_dimension()) {\n return absl::OkStatus();\n }\n }\n return Unimplemented(\"Dynamic Spatial Convolution is not supported: %s\",\n conv->ToString());\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleConcatenate(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n int64_t static_size = 0;\n std::vector dynamic_concat_dims;\n for (int64_t i = 0; i < hlo->operand_count(); ++i) {\n HloInstruction* concat_dim_size = nullptr;\n for (int64_t dimension = 0; dimension < hlo->operand(i)->shape().rank();\n ++dimension) {\n if (dimension == hlo->concatenate_dimension()) {\n HloInstruction* dynamic_size =\n parent_->GetDynamicSize(hlo->mutable_operand(i), {}, dimension);\n concat_dim_size = dynamic_size;\n }\n }\n if (concat_dim_size == nullptr) {\n static_size +=\n hlo->operand(i)->shape().dimensions(hlo->concatenate_dimension());\n } else {\n dynamic_concat_dims.push_back(concat_dim_size);\n }\n }\n std::vector dynamic_sizes(hlo->shape().rank(), nullptr);\n if (!dynamic_concat_dims.empty()) {\n HloInstruction* dim_size_total =\n hlo->parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(static_size)));\n for (HloInstruction* dynamic_dim : dynamic_concat_dims) {\n dim_size_total = hlo->parent()->AddInstruction(\n HloInstruction::CreateBinary(dim_size_total->shape(), HloOpcode::kAdd,\n dim_size_total, dynamic_dim));\n }\n dynamic_sizes[hlo->concatenate_dimension()] = dim_size_total;\n }\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n TF_RET_CHECK(index.empty());\n int64_t concatenate_dimension = hlo->concatenate_dimension();\n if (concatenate_dimension == dimension) {\n return absl::OkStatus();\n }\n dynamic_sizes[dimension] = dynamic_size;\n return absl::OkStatus();\n }));\n SetDynamicSizes(hlo, {}, dynamic_sizes);\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleGetDimensionSize(\n HloInstruction* gds) {\n int64_t dim = gds->dimension();\n TF_RET_CHECK(dim < gds->operand(0)->shape().rank()) << gds->ToString();\n HloInstruction* operand = gds->mutable_operand(0);\n TF_RET_CHECK(dim < operand->shape().rank());\n HloInstruction* replacement = parent_->GetDynamicSize(operand, {}, dim);\n HloComputation* computation = gds->parent();\n if (replacement == nullptr &&\n !gds->operand(0)->shape().is_dynamic_dimension(dim)) {\n TF_RET_CHECK(dim < gds->operand(0)->shape().rank());\n int32_t size = gds->operand(0)->shape().dimensions(dim);\n replacement = computation->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(size)),\n gds->name());\n }\n if (replacement != nullptr) {\n TF_RETURN_IF_ERROR(gds->ReplaceAllUsesWith(replacement));\n parent_->ReplaceAllDynamicDimensionUsesWith(gds, replacement);\n MarkAsChanged();\n }\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleSetDimensionSize(\n HloInstruction* hlo) {\n bool dimension_is_static = false;\n const HloInstruction* size = hlo->operand(1);\n if (size->opcode() == HloOpcode::kConstant) {\n TF_RET_CHECK(size->shape().rank() == 0);\n if (size->literal().Get({}) ==\n hlo->shape().dimensions(hlo->dimension()) &&\n !hlo->shape().is_dynamic_dimension(hlo->dimension())) {\n dimension_is_static = true;\n }\n }\n if (!dimension_is_static) {\n SetDynamicSize(hlo, {}, hlo->dimension(), hlo->mutable_operand(1),\n false);\n }\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n TF_RET_CHECK(operand_index == 0);\n if (dimension != hlo->dimension()) {\n SetDynamicSize(hlo, index, dimension, dynamic_size,\n false);\n }\n return absl::OkStatus();\n }));\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleDynamicConvolutionForward(\n HloInstruction* hlo, int64_t operand_index, int64_t dimension,\n HloInstruction* dynamic_size) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n TF_RET_CHECK(operand_index == 0);\n const ConvolutionDimensionNumbers& dimension_numbers =\n hlo->convolution_dimension_numbers();\n if (dimension == dimension_numbers.input_batch_dimension()) {\n SetDynamicSize(hlo, {}, dimension_numbers.output_batch_dimension(),\n dynamic_size);\n return absl::OkStatus();\n }\n for (int64_t spatial_dim_index = 0;\n spatial_dim_index < dimension_numbers.input_spatial_dimensions_size();\n ++spatial_dim_index) {\n int64_t input_spatial_dim =\n dimension_numbers.input_spatial_dimensions(spatial_dim_index);\n int64_t output_spatial_dim =\n dimension_numbers.output_spatial_dimensions(spatial_dim_index);\n if (dimension == input_spatial_dim) {\n WindowDimension window_dim = hlo->window().dimensions(spatial_dim_index);\n DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(\n dynamic_size, window_dim.size(), window_dim.window_dilation(),\n window_dim.stride(), hlo->padding_type());\n TF_RET_CHECK(window_dim.base_dilation() == 1);\n SetDynamicSize(hlo, {}, output_spatial_dim,\n dynamic_window_dims.output_size);\n return absl::OkStatus();\n }\n }\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleDynamicWindowSamePadding(\n HloInstruction* hlo, HloInstruction* dynamic_size, int64_t operand_index,\n int64_t dimension) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n const Window& window = hlo->window();\n const WindowDimension& window_dim = window.dimensions(dimension);\n if (!window_util::IsTrivialWindowDimension(window_dim)) {\n DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(\n dynamic_size, window_dim.size(), window_dim.window_dilation(),\n window_dim.stride(), PaddingType::PADDING_SAME);\n SetDynamicSize(hlo, {}, dimension, dynamic_window_dims.output_size);\n } else {\n SetDynamicSize(hlo, {}, dimension, dynamic_size);\n }\n return absl::OkStatus();\n}\nabsl::Status\nDynamicDimensionInferenceVisitor::HandleDynamicConvolutionInputGrad(\n HloInstruction* hlo, int64_t operand_index, int64_t dimension) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n HloInstruction* input_sizes = hlo->mutable_operand(0);\n HloComputation* comp = hlo->parent();\n TF_RET_CHECK(input_sizes->shape().rank() == 1) << hlo->ToString();\n TF_RET_CHECK(input_sizes->shape().element_type() == S32) << hlo->ToString();\n TF_RET_CHECK(input_sizes->shape().dimensions(0) ==\n hlo->shape().dimensions_size())\n << hlo->ToString();\n HloInstruction* slice = comp->AddInstruction(\n HloInstruction::CreateSlice(ShapeUtil::MakeShape(S32, {1}), input_sizes,\n {dimension}, {dimension + 1}, {1}));\n HloInstruction* reshape = comp->AddInstruction(\n HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice));\n SetDynamicSize(hlo, {}, dimension, reshape);\n return absl::OkStatus();\n}\nabsl::Status\nDynamicDimensionInferenceVisitor::HandleDynamicConvolutionKernelGrad(\n HloInstruction* hlo, int64_t operand_index, int64_t dimension) {\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::PassThroughDynamicDimension(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n ShapeTree> dynamic_sizes(\n hlo->shape());\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n const Shape& subshape = ShapeUtil::GetSubshape(hlo->shape(), index);\n auto* element = dynamic_sizes.mutable_element(index);\n element->resize(subshape.rank(), nullptr);\n (*element)[dimension] = dynamic_size;\n return absl::OkStatus();\n }));\n dynamic_sizes.ForEachElement([&](const ShapeIndex& index, const auto& sizes) {\n if (sizes.empty()) {\n return;\n }\n SetDynamicSizes(hlo, index, sizes);\n });\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleDomain(\n HloInstruction* hlo) {\n return PassThroughDynamicDimension(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleAsyncStart(\n HloInstruction* hlo) {\n if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),\n parent_->execution_threads_)) {\n return absl::OkStatus();\n }\n return DefaultAction(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleAsyncDone(\n HloInstruction* hlo) {\n if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),\n parent_->execution_threads_)) {\n return InsertPadToStaticOnInstruction(hlo);\n }\n return DefaultAction(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleElementwiseUnary(\n HloInstruction* hlo) {\n return PassThroughDynamicDimension(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleSelect(\n HloInstruction* hlo) {\n return HandleElementwiseNary(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleElementwiseNary(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n HloComputation* comp = hlo->parent();\n absl::InlinedVector, 2> operand_sizes(\n hlo->shape().rank(),\n absl::InlinedVector(hlo->operand_count(), nullptr));\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n TF_RET_CHECK(index.empty());\n operand_sizes[dimension][operand_index] = dynamic_size;\n return absl::OkStatus();\n }));\n absl::InlinedVector existing_sizes(hlo->shape().rank(),\n nullptr);\n for (int operand_index = 0; operand_index < hlo->operand_count();\n ++operand_index) {\n for (int64_t dimension = 0; dimension < hlo->shape().rank(); ++dimension) {\n HloInstruction* dynamic_size = operand_sizes[dimension][operand_index];\n if (dynamic_size == nullptr) {\n continue;\n }\n HloInstruction* existing_size = existing_sizes[dimension];\n if (existing_size == nullptr) {\n existing_sizes[dimension] = dynamic_size;\n } else if (existing_sizes[dimension] != dynamic_size) {\n TF_RETURN_IF_ERROR(\n InsertShapeCheck(existing_size, dynamic_size,\n true));\n auto one = comp->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::One(S32)));\n auto operand_needs_broadcast =\n comp->AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::MakeShape(PRED, {}), dynamic_size, existing_size,\n ComparisonDirection::kLt));\n auto is_one = comp->AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::MakeShape(PRED, {}), dynamic_size, one,\n ComparisonDirection::kEq));\n operand_needs_broadcast =\n comp->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one,\n operand_needs_broadcast));\n auto existing_needs_broadcast =\n comp->AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::MakeShape(PRED, {}), existing_size, dynamic_size,\n ComparisonDirection::kLt));\n is_one = comp->AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::MakeShape(PRED, {}), existing_size, one,\n ComparisonDirection::kEq));\n existing_needs_broadcast =\n comp->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one,\n existing_needs_broadcast));\n auto needs_broadcast =\n comp->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeShape(PRED, {}), HloOpcode::kOr,\n operand_needs_broadcast, existing_needs_broadcast));\n auto max_size = comp->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(S32), HloOpcode::kMaximum, dynamic_size,\n existing_size));\n auto min_size = comp->AddInstruction(HloInstruction::CreateBinary(\n ShapeUtil::MakeScalarShape(S32), HloOpcode::kMinimum, dynamic_size,\n existing_size));\n auto select_size = comp->AddInstruction(HloInstruction::CreateTernary(\n ShapeUtil::MakeScalarShape(S32), HloOpcode::kSelect,\n needs_broadcast, max_size, min_size));\n existing_sizes[dimension] = select_size;\n }\n }\n }\n SetDynamicSizes(hlo, {}, existing_sizes);\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleElementwiseBinary(\n HloInstruction* hlo) {\n return HandleElementwiseNary(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleClamp(\n HloInstruction* hlo) {\n return PassThroughDynamicDimension(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleDynamicReshape(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n HloDynamicReshapeInstruction* dynamic_reshape =\n Cast(hlo);\n for (int64_t i = 0; i < hlo->shape().rank(); ++i) {\n if (hlo->shape().is_dynamic_dimension(i)) {\n SetDynamicSize(hlo, {}, i, dynamic_reshape->dim_sizes(i));\n }\n }\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleReshape(\n HloInstruction* const hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n VLOG(2) << \"Handle reshape: \" << hlo->ToString() << \"\\n\";\n absl::InlinedVector dynamic_sizes(hlo->shape().rank(),\n nullptr);\n using ReshapeGroup = std::pair;\n using ReshapeGroupPair = std::pair;\n auto is_reverse_reshape_group_pair =\n [&](const HloInstruction* op1, const ReshapeGroupPair& p1,\n const HloInstruction* op2, const ReshapeGroupPair& p2) -> bool {\n return ShapeUtil::EqualStructure(\n ShapeUtil::GetSubshape(\n op1->operand(0)->shape(),\n ShapeIndex(p1.first.first, p1.first.second)),\n ShapeUtil::GetSubshape(\n op2->operand(0)->shape(),\n ShapeIndex(p2.second.first, p2.second.second))) &&\n ShapeUtil::EqualStructure(\n ShapeUtil::GetSubshape(\n op1->shape(), ShapeIndex(p1.second.first, p1.second.second)),\n ShapeUtil::GetSubshape(\n op2->operand(0)->shape(),\n ShapeIndex(p2.first.first, p2.first.second)));\n };\n auto find_reshape_group_pair = [](HloInstruction* reshape,\n int64_t input_dynamic_dimension) {\n VLOG(2) << \"Find reshape pair: \" << reshape->ToString() << \"\\n\";\n auto common_factors =\n CommonFactors(reshape->operand(0)->shape().dimensions(),\n reshape->shape().dimensions());\n ReshapeGroup input_dim = {-1, -1}, output_dim = {-1, -1};\n bool found = false;\n for (int64_t i = 0; i < common_factors.size() - 1; ++i) {\n auto start = common_factors[i];\n auto end = common_factors[i + 1];\n if (input_dynamic_dimension >= start.first &&\n input_dynamic_dimension < end.first) {\n input_dim.first = start.first;\n input_dim.second = end.first;\n output_dim.first = start.second;\n output_dim.second = end.second;\n VLOG(3) << \"Found common_factor group pair: \" << input_dim.first << \",\"\n << input_dim.second << \"->\" << output_dim.first << \",\"\n << output_dim.second << \"\\n\";\n found = true;\n break;\n }\n }\n CHECK(found);\n return ReshapeGroupPair(input_dim, output_dim);\n };\n auto reshape_group_pair_needs_flatten =\n [](const ReshapeGroupPair& reshape_pair) {\n return reshape_pair.first.second - reshape_pair.first.first > 1 &&\n reshape_pair.second.second - reshape_pair.second.first > 1;\n };\n std::function\n find_reverse_past_reshape = [&](HloInstruction* op,\n const ReshapeGroupPair reshape_pair,\n int64_t dynamic_dimension_size) {\n VLOG(2) << \"Find reverse past reshape from \" << op->ToString()\n << \" for \" << dynamic_dimension_size << \"\\n\";\n absl::InlinedVector found_dims;\n for (int op_dim_index = 0; op_dim_index < op->shape().rank();\n ++op_dim_index) {\n if (op->shape().dimensions(op_dim_index) == dynamic_dimension_size) {\n found_dims.push_back(op_dim_index);\n }\n }\n if (found_dims.empty()) {\n return false;\n }\n VLOG(3) << \"Found \" << found_dims.size() << \"\\n\";\n if (op->opcode() == HloOpcode::kReshape) {\n for (auto op_dim_index : found_dims) {\n auto orig_reshape_pair = find_reshape_group_pair(op, op_dim_index);\n if (is_reverse_reshape_group_pair(op, orig_reshape_pair, hlo,\n reshape_pair)) {\n TF_CHECK_OK(ForEachOperandDynamicDimension(\n op,\n [&](HloInstruction* operand, ShapeIndex index,\n int64_t op_dynamic_dimension, int64_t operand_index,\n HloInstruction* operand_dynamic_size) -> absl::Status {\n if (op_dynamic_dimension >= orig_reshape_pair.first.first &&\n op_dynamic_dimension < orig_reshape_pair.first.second) {\n auto dynamic_size =\n parent_->GetDynamicSize(op, {}, op_dynamic_dimension);\n CHECK_NE(dynamic_size, nullptr);\n auto hlo_dimension_index = op_dynamic_dimension -\n orig_reshape_pair.first.first +\n reshape_pair.second.first;\n dynamic_sizes[hlo_dimension_index] = dynamic_size;\n }\n return absl::OkStatus();\n }));\n return true;\n }\n }\n }\n for (auto operand : op->mutable_operands()) {\n if (find_reverse_past_reshape(operand, reshape_pair,\n dynamic_dimension_size)) {\n return true;\n }\n VLOG(3) << \"Checking \" << operand->ToString() << \"\\n\";\n }\n return false;\n };\n absl::flat_hash_map reshape_group_pairs;\n bool need_flatten_unflatten =\n hlo->inferred_dimension() != -1 &&\n hlo->shape().dimensions(hlo->inferred_dimension()) == 1;\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index,\n int64_t input_dynamic_dimension, int64_t operand_index,\n HloInstruction* operand_dynamic_size) -> absl::Status {\n auto reshape_pair =\n find_reshape_group_pair(hlo, input_dynamic_dimension);\n reshape_group_pairs[input_dynamic_dimension] = reshape_pair;\n if (reshape_group_pair_needs_flatten(reshape_pair)) {\n need_flatten_unflatten = true;\n }\n return absl::OkStatus();\n }));\n if (need_flatten_unflatten) {\n if (hlo->inferred_dimension() != -1) {\n HloInstruction* operand = hlo->mutable_operand(0);\n HloComputation* comp = hlo->parent();\n HloInstruction* dynamic_size = comp->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1)));\n int64_t static_size = 1;\n for (int64_t i = 0; i < operand->shape().rank(); i++) {\n HloInstruction* dynamic_dim_size =\n parent_->GetDynamicSize(operand, {}, i);\n if (dynamic_dim_size == nullptr) {\n static_size *= operand->shape().dimensions(i);\n } else {\n dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,\n dynamic_dim_size));\n }\n }\n HloInstruction* static_size_hlo =\n comp->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(static_size)));\n dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,\n static_size_hlo));\n int64_t size_without_inferred_dim =\n ShapeUtil::ElementsIn(hlo->shape()) /\n hlo->shape().dimensions(hlo->inferred_dimension());\n HloInstruction* size_without_inferred_dim_hlo =\n comp->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(size_without_inferred_dim)));\n dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,\n size_without_inferred_dim_hlo));\n dynamic_sizes[hlo->inferred_dimension()] = dynamic_size;\n VLOG(3)\n << \"Need to decompose a dynamic reshape to flatten-unflatten pair. \"\n << comp->parent()->ToString();\n SetDynamicSizes(hlo, {}, dynamic_sizes);\n return absl::OkStatus();\n }\n return Internal(\n \"Need inferred dimension to be set to \"\n \"flatten-unflatten pair. %s\",\n hlo->ToString());\n }\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index,\n int64_t input_dynamic_dimension, int64_t operand_index,\n HloInstruction* operand_dynamic_size) -> absl::Status {\n HloInstruction* const reshape = hlo;\n if (reshape->shape().rank() == 0) {\n VLOG(0) << \"Reshaping a dynamic dimension into a scalar, which has \"\n \"undefined behavior when input size is 0. The offending \"\n \"instruction is: \"\n << reshape->ToString();\n return absl::OkStatus();\n }\n auto iter = reshape_group_pairs.find(input_dynamic_dimension);\n CHECK(iter != reshape_group_pairs.end());\n ReshapeGroupPair reshape_group_pair = iter->second;\n auto output_dim_start = reshape_group_pair.second.first,\n output_dim_end = reshape_group_pair.second.second;\n int64_t output_dynamic_dimension = -1;\n if (operand->shape().dimensions(input_dynamic_dimension) == 1) {\n if (input_dynamic_dimension == 0) {\n output_dynamic_dimension = 0;\n } else if (input_dynamic_dimension == operand->shape().rank() - 1) {\n output_dynamic_dimension = reshape->shape().rank() - 1;\n }\n if (output_dynamic_dimension == -1) {\n return Unimplemented(\n \"Dynamic degenerated dimension that's not most-minor nor \"\n \"most-major is not supported %s\",\n reshape->ToString());\n }\n }\n if (output_dynamic_dimension == -1 &&\n output_dim_end - output_dim_start == 1) {\n output_dynamic_dimension = output_dim_start;\n }\n if (output_dynamic_dimension == -1 &&\n output_dim_end - output_dim_start > 1) {\n output_dynamic_dimension = reshape->inferred_dimension();\n if (output_dynamic_dimension == -1) {\n for (int64_t i = output_dim_start; i < output_dim_end; ++i) {\n if (reshape->shape().is_dynamic_dimension(i)) {\n output_dynamic_dimension = i;\n }\n }\n }\n if (output_dynamic_dimension == -1) {\n std::vector output_non_degenerated;\n for (int64_t i = output_dim_start; i < output_dim_end; ++i) {\n if (reshape->shape().dimensions(i) != 1) {\n output_non_degenerated.push_back(i);\n }\n }\n if (output_non_degenerated.size() == 1) {\n output_dynamic_dimension = output_non_degenerated[0];\n }\n }\n if (output_dynamic_dimension == -1 &&\n find_reverse_past_reshape(\n hlo->mutable_operand(0), reshape_group_pair,\n hlo->mutable_operand(0)->shape().dimensions(\n input_dynamic_dimension))) {\n return absl::OkStatus();\n }\n if (output_dynamic_dimension == -1) {\n return InvalidArgument(\n \"Reshape's input dynamic dimension is decomposed into \"\n \"multiple output dynamic dimensions, but the constraint is \"\n \"ambiguous and XLA can't infer the output dimension %s. \",\n hlo->ToString());\n }\n }\n CHECK_NE(output_dynamic_dimension, -1);\n const int64_t input_dim_size =\n operand->shape().dimensions(input_dynamic_dimension);\n const int64_t output_dim_size =\n reshape->shape().dimensions(output_dynamic_dimension);\n VLOG(2) << \"input_dim_size: \" << input_dim_size\n << \" output_dim_size: \" << output_dim_size;\n if (input_dim_size == output_dim_size) {\n dynamic_sizes[output_dynamic_dimension] = operand_dynamic_size;\n }\n if (input_dim_size > output_dim_size) {\n TF_RET_CHECK(input_dim_size % output_dim_size == 0)\n << reshape->ToString();\n const int64_t divisor = input_dim_size / output_dim_size;\n HloInstruction* divisor_hlo =\n hlo->parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(divisor)));\n HloInstruction* new_dynamic_size =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n operand_dynamic_size->shape(), HloOpcode::kDivide,\n operand_dynamic_size, divisor_hlo));\n dynamic_sizes[output_dynamic_dimension] = new_dynamic_size;\n }\n if (input_dim_size < output_dim_size) {\n HloInstruction* output_dynamic_size =\n dynamic_sizes[output_dynamic_dimension];\n if (output_dynamic_size == nullptr) {\n output_dynamic_size =\n hlo->parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(output_dim_size)));\n }\n HloInstruction* divisor_hlo = hlo->parent()->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(\n operand->shape().dimensions(input_dynamic_dimension))));\n HloInstruction* new_dynamic_size =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n output_dynamic_size->shape(), HloOpcode::kDivide,\n output_dynamic_size, divisor_hlo));\n new_dynamic_size =\n hlo->parent()->AddInstruction(HloInstruction::CreateBinary(\n output_dynamic_size->shape(), HloOpcode::kMultiply,\n new_dynamic_size, operand_dynamic_size));\n dynamic_sizes[output_dynamic_dimension] = new_dynamic_size;\n }\n return absl::OkStatus();\n }));\n SetDynamicSizes(hlo, {}, dynamic_sizes);\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleReduceWindow(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n ShapeTree> dynamic_sizes(\n hlo->shape());\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n auto* reduce_window = Cast(hlo);\n const WindowDimension& window_dim =\n reduce_window->window().dimensions(dimension);\n if (operand_index >= reduce_window->input_count()) {\n return absl::OkStatus();\n }\n if (!window_util::IsTrivialWindowDimension(window_dim)) {\n DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(\n dynamic_size, window_dim.size(), window_dim.window_dilation(),\n window_dim.stride(), PaddingType::PADDING_VALID);\n dynamic_size = dynamic_window_dims.output_size;\n }\n ShapeUtil::ForEachSubshape(\n reduce_window->shape(),\n [&](const Shape& subshape, ShapeIndex reduce_window_result_index) {\n if (!ShapeUtil::IsLeafIndex(reduce_window->shape(),\n reduce_window_result_index)) {\n return;\n }\n auto* leaf_dynamic_sizes =\n dynamic_sizes.mutable_element(reduce_window_result_index);\n leaf_dynamic_sizes->resize(subshape.rank(), nullptr);\n (*leaf_dynamic_sizes)[dimension] = dynamic_size;\n });\n return absl::OkStatus();\n }));\n dynamic_sizes.ForEachElement(\n [&](const ShapeIndex& shape_index,\n const absl::InlinedVector sizes) {\n if (sizes.empty()) {\n return;\n }\n SetDynamicSizes(hlo, shape_index, sizes);\n });\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleSelectAndScatter(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n if (operand_index == 1) {\n return absl::OkStatus();\n }\n SetDynamicSize(hlo, {}, dimension, dynamic_size);\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleSlice(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex , int64_t dimension,\n int64_t ,\n HloInstruction* dynamic_size) -> absl::Status {\n int64_t start = hlo->slice_starts(dimension);\n int64_t limit = hlo->slice_limits(dimension);\n int64_t stride = hlo->slice_strides(dimension);\n int64_t size = CeilOfRatio(limit - start, stride);\n if (size == 1) {\n TF_RET_CHECK(!hlo->shape().is_dynamic_dimension(dimension));\n return absl::OkStatus();\n }\n TF_RET_CHECK(hlo->shape().is_dynamic_dimension(dimension));\n if (start != 0) {\n dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kSubtract, dynamic_size,\n hlo->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(start)))));\n }\n if (stride != 1) {\n dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kAdd, dynamic_size,\n hlo->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(stride - 1)))));\n dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary(\n dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,\n hlo->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(stride)))));\n }\n SetDynamicSize(hlo, {}, dimension, dynamic_size);\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleDynamicSlice(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n if (hlo->shape().dimensions(dimension) == 1) {\n return absl::OkStatus();\n }\n if (hlo->shape().dimensions(dimension) !=\n hlo->operand(0)->shape().dimensions(dimension)) {\n return Unimplemented(\n \"Dynamic dimension propagation on DynamicSlice where a partial \"\n \"dimension is selected %s\",\n hlo->ToString());\n }\n TF_RET_CHECK(operand_index == 0);\n TF_RET_CHECK(index.empty());\n SetDynamicSize(hlo, {}, dimension, dynamic_size);\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleDynamicUpdateSlice(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n absl::InlinedVector output_dynamic_sizes(\n hlo->shape().rank(), nullptr);\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status {\n TF_RET_CHECK(index.empty());\n if (hlo->shape().dimensions(dimension) !=\n hlo->operand(0)->shape().dimensions(dimension)) {\n return Unimplemented(\n \"Dynamic dimension propagation on DynamicUpdateSlice where a \"\n \"partial dimension is selected %s\",\n hlo->ToString());\n }\n if (operand_index == 1 &&\n hlo->operand(1)->shape().dimensions(dimension) <\n hlo->operand(0)->shape().dimensions(dimension)) {\n hlo->mutable_shape()->set_dynamic_dimension(dimension, false);\n return absl::OkStatus();\n }\n output_dynamic_sizes[dimension] = dynamic_size;\n return absl::OkStatus();\n }));\n SetDynamicSizes(hlo, {}, output_dynamic_sizes);\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleReverse(\n HloInstruction* hlo) {\n return PassThroughDynamicDimension(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleGather(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n absl::InlinedVector output_dynamic_sizes(\n hlo->shape().rank(), nullptr);\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex ,\n int64_t input_dynamic_dimension, int64_t operand_index,\n HloInstruction* dynamic_size) -> absl::Status {\n const GatherDimensionNumbers& gather_dims =\n hlo->gather_dimension_numbers();\n if (operand_index == 0) {\n if (hlo->gather_slice_sizes()[input_dynamic_dimension] == 1) {\n return absl::OkStatus();\n }\n if (hlo->gather_slice_sizes()[input_dynamic_dimension] ==\n operand->shape().dimensions(input_dynamic_dimension)) {\n int64_t operand_dimension = 0;\n for (int64_t output_dimension : gather_dims.offset_dims()) {\n TF_RET_CHECK(output_dimension < hlo->shape().rank());\n while (operand_dimension < operand->shape().rank() &&\n absl::c_linear_search(gather_dims.collapsed_slice_dims(),\n operand_dimension)) {\n ++operand_dimension;\n }\n TF_RET_CHECK(operand_dimension < operand->shape().rank());\n if (operand_dimension == input_dynamic_dimension) {\n output_dynamic_sizes[output_dimension] = dynamic_size;\n return absl::OkStatus();\n }\n ++operand_dimension;\n }\n return Internal(\"Invalid instruction: %s\", hlo->ToString());\n }\n return Unimplemented(\n \"Detects a dynamic dimension on the data input of gather, which \"\n \"is not supported: %s, %lld\",\n hlo->ToString(), input_dynamic_dimension);\n }\n int64_t indices_rank = hlo->operand(1)->shape().rank();\n if (gather_dims.index_vector_dim() == indices_rank) {\n ++indices_rank;\n }\n int64_t output_rank = hlo->shape().rank();\n int64_t indices_dim = 0;\n for (int64_t output_dim = 0; output_dim < output_rank; ++output_dim) {\n if (!absl::c_linear_search(gather_dims.offset_dims(), output_dim)) {\n if (indices_dim == gather_dims.index_vector_dim()) {\n indices_dim++;\n }\n if (indices_dim++ == input_dynamic_dimension) {\n output_dynamic_sizes[output_dim] = dynamic_size;\n return absl::OkStatus();\n }\n }\n }\n CHECK(indices_dim == indices_rank);\n return Unimplemented(\n \"Detects a non-batch dynamic dimension of gather, \"\n \"which is not supported: %s\",\n hlo->ToString());\n }));\n SetDynamicSizes(hlo, {}, output_dynamic_sizes);\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleConditional(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n std::vector new_branch_computations;\n std::vector new_operands;\n ShapeTree> dynamic_output_mapping(\n hlo->shape());\n bool need_rewrite = false;\n for (int64_t branch_index = 0; branch_index < hlo->branch_count();\n ++branch_index) {\n std::vector operands_to_add;\n absl::flat_hash_map\n dynamic_size_to_operand_id_index_map;\n const int64_t operand_index = branch_index + 1;\n int operand_count =\n hlo->operand(operand_index)->shape().tuple_shapes_size();\n TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand(\n hlo, operand_index,\n [&](HloInstruction*, ShapeIndex, int64_t, int64_t,\n HloInstruction* dynamic_size) -> absl::Status {\n TF_RET_CHECK(hlo->operand(operand_index)->shape().IsTuple())\n << \"Only tuple typed inputs can have dynamic dimension. Please \"\n \"file a bug against XLA team.\";\n const HloInstruction* tuple_operand = hlo->operand(operand_index);\n for (int64_t i = 0; i < tuple_operand->operand_count(); ++i) {\n if (dynamic_size == tuple_operand->operand(i)) {\n dynamic_size_to_operand_id_index_map[dynamic_size] = i;\n return absl::OkStatus();\n }\n }\n auto iter = dynamic_size_to_operand_id_index_map.find(dynamic_size);\n if (iter == dynamic_size_to_operand_id_index_map.end()) {\n operands_to_add.push_back(dynamic_size);\n dynamic_size_to_operand_id_index_map[dynamic_size] =\n operand_count++;\n }\n return absl::OkStatus();\n }));\n HloInstruction* original_input = hlo->mutable_operand(operand_index);\n HloComputation* branch_computation = hlo->branch_computation(branch_index);\n HloComputation* new_computation = branch_computation;\n CallInliner::InlinedInstructionMap inline_map;\n HloInstruction* new_operand = hlo->mutable_operand(operand_index);\n Shape new_param_shape =\n branch_computation->parameter_instruction(0)->shape();\n if (!operands_to_add.empty()) {\n TF_RET_CHECK(original_input->shape().IsTuple());\n need_rewrite = true;\n new_operand = TupleUtil::AppendSuffix(original_input, operands_to_add);\n for (HloInstruction* operand : operands_to_add) {\n ShapeUtil::AppendShapeToTuple(operand->shape(), &new_param_shape);\n }\n TF_ASSIGN_OR_RETURN(\n std::tie(new_computation, inline_map),\n WidenComputation(branch_computation, new_param_shape));\n }\n DynamicParameterBinding dynamic_parameter_binding;\n TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand(\n hlo, operand_index,\n [&](HloInstruction*, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n DynamicParameterBinding::DynamicSizeParameter dynamic_parameter{\n 0, {dynamic_size_to_operand_id_index_map[dynamic_size]}};\n DynamicParameterBinding::DynamicDimension dynamic_dimension{\n 0, {index}, dimension};\n TF_RETURN_IF_ERROR(dynamic_parameter_binding.Bind(dynamic_parameter,\n dynamic_dimension));\n return absl::OkStatus();\n }));\n VLOG(2) << \"dynamic_parameter_binding for conditional branch\"\n << dynamic_parameter_binding;\n for (auto [old_inst, new_inst] : inline_map) {\n parent_->CopyMapping(\n old_inst,\n new_inst,\n &inline_map);\n }\n TF_ASSIGN_OR_RETURN(\n bool changed,\n DynamicDimensionInferenceVisitor::Run(\n new_computation, dataflow_analysis_, dynamic_parameter_binding,\n parent_, custom_call_handler_, shape_check_mode_,\n assertion_generator_));\n if (changed) {\n MarkAsChanged();\n }\n new_branch_computations.push_back(new_computation);\n new_operands.push_back(new_operand);\n }\n int tuple_count = hlo->shape().tuple_shapes_size();\n ShapeUtil::ForEachSubshape(\n hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) {\n if (!subshape.IsArray()) {\n return;\n }\n for (int64_t i = 0; i < subshape.rank(); ++i) {\n for (int64_t j = 0; j < new_branch_computations.size(); ++j) {\n HloInstruction* dynamic_size = parent_->GetDynamicSize(\n new_branch_computations[j]->root_instruction(), index, i);\n if (dynamic_size) {\n if (dynamic_output_mapping.element(index).contains(i)) {\n continue;\n }\n dynamic_output_mapping.mutable_element(index)->emplace(\n i, tuple_count++);\n }\n }\n }\n });\n for (int64_t branch_index = 0; branch_index < hlo->branch_count();\n ++branch_index) {\n std::vector hlos_to_add_in_root;\n ShapeUtil::ForEachSubshape(\n hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) {\n if (!subshape.IsArray()) {\n return;\n }\n for (int64_t i = 0; i < subshape.rank(); ++i) {\n if (dynamic_output_mapping.element(index).contains(i)) {\n HloInstruction* dynamic_size = parent_->GetDynamicSize(\n new_branch_computations[branch_index]->root_instruction(),\n index, i);\n if (dynamic_size) {\n hlos_to_add_in_root.push_back(dynamic_size);\n } else {\n HloInstruction* constant_size =\n new_branch_computations[branch_index]->AddInstruction(\n HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(\n subshape.dimensions(i))));\n hlos_to_add_in_root.push_back(constant_size);\n }\n }\n }\n });\n VLOG(2) << \"hlos_to_add_in_root:\" << hlos_to_add_in_root.size();\n if (!hlos_to_add_in_root.empty()) {\n need_rewrite = true;\n HloInstruction* new_branch_root = TupleUtil::AppendSuffix(\n new_branch_computations[branch_index]->root_instruction(),\n hlos_to_add_in_root);\n new_branch_computations[branch_index]->set_root_instruction(\n new_branch_root,\n true);\n }\n }\n if (!need_rewrite) {\n return absl::OkStatus();\n }\n HloInstruction* new_conditional =\n hlo->parent()->AddInstruction(HloInstruction::CreateConditional(\n new_branch_computations[0]->root_instruction()->shape(),\n hlo->mutable_operand(0), new_branch_computations, new_operands));\n HloInstruction* new_conditional_extracted = TupleUtil::ExtractPrefix(\n new_conditional, hlo->shape().tuple_shapes_size());\n dynamic_output_mapping.ForEachElement(\n [&](const ShapeIndex& index,\n const absl::flat_hash_map& dim_to_output) {\n for (auto iter : dim_to_output) {\n int64_t dim = iter.first;\n int64_t output_index = iter.second;\n HloInstruction* dynamic_size = hlo->parent()->AddInstruction(\n HloInstruction::CreateGetTupleElement(\n ShapeUtil::MakeScalarShape(S32), new_conditional,\n output_index));\n SetDynamicSize(new_conditional, index, dim, dynamic_size,\n false);\n SetDynamicSize(new_conditional_extracted, index, dim, dynamic_size,\n false);\n }\n });\n TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_conditional_extracted));\n TF_RETURN_IF_ERROR(hlo->parent()->RemoveInstruction(hlo));\n SetVisited(*new_conditional);\n SetVisited(*new_conditional_extracted);\n MarkAsChanged();\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleMap(HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return HandleElementwiseNary(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleScatter(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n return ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex dynamic_index, int64_t dimension,\n int64_t operand_index,\n HloInstruction* operand_dynamic_size) -> absl::Status {\n if (operand_index == 0) {\n SetDynamicSize(hlo, {}, dimension, operand_dynamic_size);\n return absl::OkStatus();\n }\n const ScatterDimensionNumbers& scatter_dims =\n hlo->scatter_dimension_numbers();\n if (operand_index == 2 &&\n absl::c_linear_search(scatter_dims.update_window_dims(),\n dimension)) {\n std::vector update_window_dims_in_operand;\n for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) {\n if (absl::c_linear_search(scatter_dims.inserted_window_dims(), i)) {\n continue;\n }\n update_window_dims_in_operand.push_back(i);\n }\n for (int64_t i = 0; i < scatter_dims.update_window_dims_size(); ++i) {\n if (scatter_dims.update_window_dims(i) == dimension) {\n const Shape& operand_shape = hlo->operand(0)->shape();\n const Shape& update_shape = hlo->operand(2)->shape();\n int64_t dim_in_operand = update_window_dims_in_operand[i];\n if (operand_shape.dimensions(dim_in_operand) !=\n update_shape.dimensions(dimension)) {\n return Unimplemented(\n \"Dynamic dimension of update window dims that are not the \"\n \"same as corresponding operand dim is not supported: \"\n \"%s : %d : %d : %d\",\n hlo->ToString(), i, update_shape.dimensions(dimension),\n operand_shape.dimensions(dim_in_operand));\n }\n HloInstruction* base_dynamic_size = parent_->GetDynamicSize(\n hlo->mutable_operand(0), {}, dim_in_operand);\n if (base_dynamic_size == nullptr ||\n !operand_shape.is_dynamic_dimension(dim_in_operand)) {\n return absl::OkStatus();\n }\n if (base_dynamic_size != operand_dynamic_size) {\n return Unimplemented(\n \"Dynamic dimension size of update window dims that are not \"\n \"the same as corresponding operand dim is not supported: \"\n \"%s.\\n Dynamic dim size of base: %s, dynamic dim size of \"\n \"update: %s\",\n hlo->ToString(), base_dynamic_size->ToString(),\n operand_dynamic_size->ToString());\n }\n }\n }\n }\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleWhile(\n HloInstruction* hlo) {\n if (!CanInfer(hlo)) {\n return absl::OkStatus();\n }\n Shape original_shape = hlo->shape();\n ShapeTree> dynamic_output_mapping(\n original_shape);\n std::vector operands_to_add;\n const int original_tuple_count = original_shape.tuple_shapes_size();\n int operand_count = original_tuple_count;\n DynamicParameterBinding binding_for_while;\n TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension(\n hlo,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dim,\n int64_t operand_num, HloInstruction* dynamic_size) -> absl::Status {\n TF_RET_CHECK(operand_num == 0);\n operands_to_add.push_back(dynamic_size);\n dynamic_output_mapping.mutable_element(index)->emplace(dim,\n operand_count);\n DynamicParameterBinding::DynamicDimension dynamic_dimension{\n 0,\n index,\n dim,\n };\n DynamicParameterBinding::DynamicSizeParameter dynamic_size_param{\n 0,\n {operand_count},\n };\n TF_RETURN_IF_ERROR(\n binding_for_while.Bind(dynamic_size_param, dynamic_dimension));\n ++operand_count;\n return absl::OkStatus();\n }));\n if (operands_to_add.empty()) {\n return absl::OkStatus();\n }\n HloInstruction* old_tuple_operand = hlo->mutable_operand(0);\n HloInstruction* old_body_root = hlo->while_body()->root_instruction();\n TF_ASSIGN_OR_RETURN(WhileUtil::MakeInstructionsLiveInResult result,\n WhileUtil::MakeInstructionsLiveIn(hlo, operands_to_add));\n TF_RET_CHECK(result.replacement_instr->opcode() == HloOpcode::kTuple);\n HloInstruction* new_tuple_operand =\n result.new_while_instr->mutable_operand(0);\n parent_->CopyMapping(old_tuple_operand,\n new_tuple_operand);\n hlo = result.new_while_instr;\n SetVisited(*hlo);\n for (auto [old_inst, new_inst] : result.while_body_instruction_map) {\n parent_->CopyMapping(\n old_inst,\n new_inst,\n &result.while_body_instruction_map);\n }\n parent_->CopyMapping(old_body_root,\n hlo->while_body()->root_instruction(),\n &result.while_body_instruction_map);\n for (auto [old_inst, new_inst] : result.while_condition_instruction_map) {\n parent_->CopyMapping(\n old_inst,\n new_inst,\n &result.while_condition_instruction_map);\n }\n TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run(\n hlo->while_body(), dataflow_analysis_,\n binding_for_while, parent_, custom_call_handler_,\n shape_check_mode_, assertion_generator_)\n .status());\n TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run(\n hlo->while_condition(), dataflow_analysis_,\n binding_for_while, parent_, custom_call_handler_,\n shape_check_mode_, assertion_generator_)\n .status());\n HloInstruction* body_root = hlo->while_body()->root_instruction();\n std::vector new_root_operands(body_root->operand_count(),\n nullptr);\n for (int i = 0; i < original_tuple_count; ++i) {\n new_root_operands[i] =\n body_root->AddInstruction(HloInstruction::CreateGetTupleElement(\n body_root->shape().tuple_shapes(i), body_root, i));\n }\n TF_RETURN_IF_ERROR(dynamic_output_mapping.ForEachElementWithStatus(\n [&](const ShapeIndex& index,\n const absl::flat_hash_map& dim_to_size)\n -> absl::Status {\n for (auto [dimension, output_index] : dim_to_size) {\n TF_RET_CHECK(new_root_operands[output_index] == nullptr);\n HloInstruction* dynamic_size =\n parent_->GetDynamicSize(body_root, index, dimension);\n TF_RET_CHECK(dynamic_size != nullptr);\n new_root_operands[output_index] = dynamic_size;\n }\n return absl::OkStatus();\n }));\n for (auto operand : new_root_operands) {\n TF_RET_CHECK(operand != nullptr);\n }\n HloInstruction* new_body_root = hlo->while_body()->AddInstruction(\n HloInstruction::CreateTuple(new_root_operands));\n for (int i = 0; i < original_tuple_count; ++i) {\n TF_RETURN_IF_ERROR(ForEachDynamicDimension(\n body_root,\n [&](ShapeIndex index, int64_t dimension,\n HloInstruction* dynamic_size) -> absl::Status {\n SetDynamicSize(new_body_root, index, dimension, dynamic_size);\n if (index.empty() || index.front() != i) {\n return absl::OkStatus();\n }\n index.pop_front();\n SetDynamicSize(new_root_operands[i], index, dimension, dynamic_size);\n return absl::OkStatus();\n }));\n }\n hlo->while_body()->set_root_instruction(new_body_root);\n MarkAsChanged();\n return dynamic_output_mapping.ForEachElementWithStatus(\n [&](const ShapeIndex& index,\n const absl::flat_hash_map& dim_to_size)\n -> absl::Status {\n for (auto [dimension, output_index] : dim_to_size) {\n HloInstruction* dynamic_size = hlo->AddInstruction(\n HloInstruction::CreateGetTupleElement(hlo, output_index));\n SetDynamicSize(result.replacement_instr, index, dimension,\n dynamic_size);\n ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index)\n ->set_dynamic_dimension(dimension, false);\n TF_RET_CHECK(!index.empty());\n HloInstruction* gte =\n result.replacement_instr->mutable_operand(index.front());\n TF_RET_CHECK(gte->opcode() == HloOpcode::kGetTupleElement);\n TF_RET_CHECK(gte->operand(0) == hlo);\n ShapeUtil::GetMutableSubshape(gte->mutable_shape(),\n ShapeIndexView(index).subspan(1))\n ->set_dynamic_dimension(dimension, false);\n }\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleParameter(\n HloInstruction* hlo) {\n if (hlo->parent()->IsEntryComputation()) {\n TF_RET_CHECK(param_bindings_.empty());\n return InsertPadToStaticOnInstruction(hlo);\n }\n return param_bindings_.ForEachBinding(\n [&](const DynamicParameterBinding::DynamicSizeParameter& dynamic_size,\n const DynamicParameterBinding::DynamicDimension& dynamic_dimension)\n -> absl::Status {\n if (dynamic_dimension.parameter_num == hlo->parameter_number()) {\n SetDynamicSize(\n hlo, dynamic_dimension.parameter_index,\n dynamic_dimension.dimension,\n TupleUtil::AddGetTupleElements(HloPosition{\n hlo->parent()->parameter_instruction(\n dynamic_size.parameter_num),\n dynamic_size.parameter_index,\n }));\n }\n return absl::OkStatus();\n });\n}\nabsl::Status DynamicDimensionInferenceVisitor::HandleInfeed(\n HloInstruction* hlo) {\n return InsertPadToStaticOnInstruction(hlo);\n}\nabsl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimension(\n HloInstruction* inst, const DynamicDimensionFn& fn) {\n auto iter = parent_->per_hlo_dynamic_dimensions_.find(inst);\n if (iter != parent_->per_hlo_dynamic_dimensions_.end()) {\n for (auto& dynamic_dimension : iter->second) {\n HloInstruction* dynamic_size = parent_->GetDynamicSize(\n dynamic_dimension.inst, dynamic_dimension.index,\n dynamic_dimension.dim);\n TF_RETURN_IF_ERROR(\n fn(dynamic_dimension.index, dynamic_dimension.dim, dynamic_size));\n }\n }\n return absl::OkStatus();\n}\nabsl::StatusOr DynamicDimensionInferenceVisitor::RequiresPadToStatic(\n HloInstruction* instr, ShapeIndex shape_index) {\n TF_RET_CHECK(ShapeUtil::IsLeafIndex(instr->shape(), shape_index))\n << instr->shape() << \" @ \" << shape_index;\n if (ShapeUtil::GetSubshape(instr->shape(), shape_index).is_static()) {\n return false;\n }\n auto uses =\n dataflow_analysis_.GetValueDefinedAt(instr, shape_index).GetUses();\n for (const auto& use : uses) {\n if (use.instruction->opcode() == HloOpcode::kAsyncStart ||\n use.instruction->opcode() == HloOpcode::kAsyncUpdate ||\n use.instruction->opcode() == HloOpcode::kAsyncDone ||\n use.instruction->opcode() == HloOpcode::kCall ||\n use.instruction->opcode() == HloOpcode::kTuple ||\n use.instruction->opcode() == HloOpcode::kGetTupleElement ||\n use.instruction->opcode() == HloOpcode::kConditional) {\n continue;\n }\n if (use.instruction->opcode() == HloOpcode::kWhile) {\n TF_RET_CHECK(use.operand_number == 0);\n HloInstruction* root = use.instruction->while_body()->root_instruction();\n if (parent_->HasDynamicDimension(root, use.operand_index)) {\n return true;\n }\n continue;\n }\n if (use.instruction->opcode() == HloOpcode::kSetDimensionSize) {\n TF_RET_CHECK(use.operand_number == 0);\n return true;\n }\n if (use.instruction->opcode() == HloOpcode::kGetDimensionSize) {\n return true;\n }\n if (use.instruction->opcode() != HloOpcode::kCustomCall ||\n use.instruction->custom_call_target() != \"PadToStatic\") {\n if (parent_->op_supports_dynamism_handler_ == nullptr) {\n return true;\n }\n if (parent_->op_supports_dynamism_handler_(use.instruction) ==\n OpDynamismSupport::kNoSupport) {\n return true;\n }\n }\n }\n return false;\n}\nabsl::Status DynamicDimensionInferenceVisitor::InsertPadToStaticOnInstruction(\n HloInstruction* inst) {\n if (inst->shape().is_static()) {\n return absl::OkStatus();\n }\n ShapeTree needs_pad(inst->shape(), false);\n bool any_needs_pad = false;\n TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(\n inst->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) {\n if (subshape.IsTuple()) {\n return absl::OkStatus();\n }\n TF_ASSIGN_OR_RETURN(bool do_pad,\n RequiresPadToStatic(inst, shape_index));\n if (do_pad) {\n *needs_pad.mutable_element(shape_index) = true;\n any_needs_pad = true;\n }\n return absl::OkStatus();\n }));\n if (!any_needs_pad) {\n return absl::OkStatus();\n }\n auto users = inst->users();\n ShapeTree gtes =\n TupleUtil::DisassembleTupleInstruction(inst);\n ShapeTree padded(inst->shape(), nullptr);\n TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapePostOrderWithStatus(\n inst->shape(),\n [&](const Shape& subshape,\n const ShapeIndex& shape_index) -> absl::Status {\n HloInstruction* element = gtes.element(shape_index);\n SetVisited(*gtes.element(shape_index));\n if (subshape.IsTuple()) {\n absl::InlinedVector children;\n ShapeIndex child_index = shape_index;\n for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {\n child_index.push_back(i);\n children.push_back(padded.element(child_index));\n child_index.pop_back();\n }\n HloInstruction* tuple =\n element->AddInstruction(HloInstruction::CreateVariadic(\n subshape, HloOpcode::kTuple, children));\n TF_CHECK_OK(ForEachOperandDynamicDimension(\n tuple,\n [&](HloInstruction* operand, ShapeIndex index, int64_t dimension,\n int64_t operand_index, HloInstruction* dynamic_size) {\n index.push_front(operand_index);\n SetDynamicSize(tuple, index, dimension, dynamic_size);\n return absl::OkStatus();\n }));\n *padded.mutable_element(shape_index) = tuple;\n return absl::OkStatus();\n }\n if (needs_pad.element(shape_index)) {\n Shape data_output_shape =\n ShapeUtil::MakeStaticShape(element->shape()); \n Shape output_shape = ShapeUtil::MakeTupleShape({data_output_shape});\n for (int64_t i = 0; i < element->shape().rank(); ++i) {\n ShapeUtil::AppendShapeToTuple(ShapeUtil::MakeScalarShape(S32),\n &output_shape);\n }\n HloInstruction* pad_to_static = inst->parent()->AddInstruction(\n HloInstruction::CreateCustomCall(output_shape, {element},\n \"PadToStatic\"),\n absl::StrCat(element->name(), \".padded\"));\n SetVisited(*pad_to_static);\n HloInstruction* data_output = inst->parent()->AddInstruction(\n HloInstruction::CreateGetTupleElement(data_output_shape,\n pad_to_static, 0),\n absl::StrCat(element->name(), \".data\"));\n SetVisited(*data_output);\n for (int64_t i = 0; i < element->shape().rank(); ++i) {\n if (!element->shape().is_dynamic_dimension(i)) {\n continue;\n }\n HloInstruction* dynamic_size_output =\n inst->parent()->AddInstruction(\n HloInstruction::CreateGetTupleElement(\n output_shape.tuple_shapes(i + 1), pad_to_static, i + 1),\n absl::StrCat(element->name(), \".size\"));\n SetVisited(*dynamic_size_output);\n SetDynamicSize(data_output, {}, i, dynamic_size_output,\n false);\n }\n *padded.mutable_element(shape_index) = data_output;\n } else {\n *padded.mutable_element(shape_index) = element;\n }\n return absl::OkStatus();\n }));\n HloInstruction* result = padded.element({});\n for (auto user : users) {\n for (int64_t i : user->OperandIndices(inst)) {\n TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, result));\n }\n }\n if (inst->IsRoot()) {\n inst->parent()->set_root_instruction(result);\n }\n MarkAsChanged();\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::InsertShapeCheck(\n HloInstruction* dim1, HloInstruction* dim2,\n bool support_implicit_broadcast) {\n switch (shape_check_mode_) {\n case DynamicDimensionInference::kIgnore:\n return absl::OkStatus();\n case DynamicDimensionInference::kCompileTime:\n return InvalidArgument(\n \"Fail to proof the equality of two dimensions at compile time: \"\n \"%s vs %s\",\n dim1->ToString(), dim2->ToString());\n case DynamicDimensionInference::kRuntime: {\n TF_ASSIGN_OR_RETURN(\n HloInstruction * assertion,\n MakeCompareHlo(Comparison::Direction::kEq, dim1, dim2));\n if (shape_assertion_ == nullptr) {\n shape_assertion_ = assertion;\n } else {\n TF_ASSIGN_OR_RETURN(\n shape_assertion_,\n MakeBinaryHlo(HloOpcode::kAnd, shape_assertion_, assertion));\n }\n return absl::OkStatus();\n }\n default:\n LOG(FATAL) << \"Unreachable\";\n }\n}\nabsl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimensionInOperand(\n HloInstruction* inst, int64_t operand_index, OperandDynamicDimensionFn fn) {\n auto iter =\n parent_->per_hlo_dynamic_dimensions_.find(inst->operand(operand_index));\n if (iter != parent_->per_hlo_dynamic_dimensions_.end()) {\n for (auto& dynamic_dimension : iter->second) {\n HloInstruction* dynamic_size = parent_->GetDynamicSize(\n dynamic_dimension.inst, dynamic_dimension.index,\n dynamic_dimension.dim);\n TF_RETURN_IF_ERROR(fn(dynamic_dimension.inst, dynamic_dimension.index,\n dynamic_dimension.dim, operand_index,\n dynamic_size));\n }\n }\n return absl::OkStatus();\n}\nabsl::Status DynamicDimensionInferenceVisitor::ForEachOperandDynamicDimension(\n HloInstruction* inst, OperandDynamicDimensionFn fn) {\n for (int64_t operand_index = 0; operand_index < inst->operand_count();\n ++operand_index) {\n TF_RETURN_IF_ERROR(\n ForEachDynamicDimensionInOperand(inst, operand_index, fn));\n }\n return absl::OkStatus();\n}\nvoid DynamicDimensionInference::SetDynamicSize(HloInstruction* inst,\n const ShapeIndex& index,\n int64_t dim,\n HloInstruction* size) {\n CHECK_NE(inst, nullptr);\n CHECK_NE(size, nullptr);\n VLOG(1) << \"Set dimension inst \" << inst->ToString() << \" index \"\n << index.ToString() << \"@\" << dim << \" to \" << size->ToShortString();\n const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index);\n CHECK(!subshape.IsTuple()) << \"Can't set a tuple shape to dynamic dimension\";\n CHECK(dim < subshape.rank() && dim >= 0)\n << \"Asked to set invalid dynamic dimension. Shape: \"\n << subshape.ToString() << \", Dimension: \" << dim;\n DynamicDimension dynamic_dimension{inst, index, dim};\n auto [it, inserted] = dynamic_mapping_.try_emplace(dynamic_dimension, size);\n if (!inserted) {\n CHECK_EQ(size, it->second) << \"old: \" << it->second->ToShortString()\n << \", new: \" << size->ToShortString();\n }\n auto iter = per_hlo_dynamic_dimensions_.try_emplace(inst);\n iter.first->second.emplace(dynamic_dimension);\n}\nvoid DynamicDimensionInference::CopyMapping(\n HloInstruction* from, HloInstruction* to,\n const absl::flat_hash_map*\n dynamic_size_map) {\n auto iter = per_hlo_dynamic_dimensions_.find(from);\n if (iter != per_hlo_dynamic_dimensions_.end()) {\n for (auto& dynamic_dimension : iter->second) {\n HloInstruction* dynamic_size =\n GetDynamicSize(dynamic_dimension.inst, dynamic_dimension.index,\n dynamic_dimension.dim);\n if (dynamic_size_map != nullptr) {\n dynamic_size = dynamic_size_map->at(dynamic_size);\n }\n SetDynamicSize(to, dynamic_dimension.index, dynamic_dimension.dim,\n dynamic_size);\n }\n }\n}\nabsl::StatusOr DynamicDimensionInference::Run(\n HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler,\n CustomCallInferenceHandler custom_call_handler,\n ShapeCheckMode shape_check_mode,\n const AssertionGenerator& assertion_generator,\n const absl::flat_hash_set& execution_threads) {\n DynamicDimensionInference inference(\n module, std::move(op_supports_dynamism_handler),\n std::move(custom_call_handler), shape_check_mode, assertion_generator,\n execution_threads);\n TF_RETURN_IF_ERROR(inference.AnalyzeDynamicDimensions());\n return std::move(inference);\n}\nstd::string DynamicDimensionInference::ToString() const {\n std::vector pieces;\n pieces.push_back(\"DynamicDimensionInference: \");\n for (const auto& mapping : dynamic_mapping_) {\n const DynamicDimension& dynamic_dimension = mapping.first;\n pieces.push_back(absl::StrFormat(\n \" -- instruction %s at %s has dim %lld as dynamic\"\n \" dimension, which is represented by instruction %s\",\n dynamic_dimension.inst->ToString(), dynamic_dimension.index.ToString(),\n dynamic_dimension.dim, mapping.second->ToString()));\n }\n return absl::StrJoin(pieces, \"\\n\");\n}\nDynamicDimensionInference::DynamicDimensionInference(\n HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler,\n CustomCallInferenceHandler custom_call_handler,\n ShapeCheckMode shape_check_mode, AssertionGenerator assertion_generator,\n const absl::flat_hash_set& execution_threads)\n : module_(module),\n op_supports_dynamism_handler_(std::move(op_supports_dynamism_handler)),\n custom_call_handler_(std::move(custom_call_handler)),\n shape_check_mode_(shape_check_mode),\n assertion_generator_(assertion_generator),\n execution_threads_(execution_threads) {}\nabsl::Status DynamicDimensionInference::AnalyzeDynamicDimensions() {\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr dataflow_analysis,\n HloDataflowAnalysis::Run(*module_, false,\n true,\n nullptr,\n nullptr, execution_threads_));\n for (HloComputation* computation : module_->MakeComputationPostOrder()) {\n if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),\n execution_threads_)) {\n continue;\n }\n TF_ASSIGN_OR_RETURN(\n bool changed,\n DynamicDimensionInferenceVisitor::Run(\n computation, *dataflow_analysis, {}, this, custom_call_handler_,\n shape_check_mode_, assertion_generator_));\n changed_ |= changed;\n }\n return absl::OkStatus();\n}\nvoid DynamicDimensionInference::ReplaceAllDynamicDimensionUsesWith(\n HloInstruction* replace, HloInstruction* with) {\n CHECK(Shape::Equal().IgnoreLayout()(replace->shape(),\n ShapeUtil::MakeScalarShape(S32)));\n CHECK(Shape::Equal().IgnoreLayout()(with->shape(),\n ShapeUtil::MakeScalarShape(S32)));\n for (auto& kv : dynamic_mapping_) {\n if (kv.second == replace) {\n kv.second = with;\n }\n }\n}\nabsl::Status DynamicDimensionInference::ForwardDynamicSize(\n HloInstruction* inst, HloInstruction* new_inst, const ShapeIndex& index) {\n TF_RET_CHECK(ShapeUtil::Compatible(inst->shape(), new_inst->shape()));\n for (int64_t dim = 0; dim < inst->shape().rank(); ++dim) {\n DynamicDimension dynamic_dimension_new{new_inst, index, dim};\n DynamicDimension dynamic_dimension{inst, index, dim};\n auto iter = dynamic_mapping_.find(dynamic_dimension);\n if (iter != dynamic_mapping_.end()) {\n dynamic_mapping_.insert({dynamic_dimension_new, iter->second});\n auto iter = per_hlo_dynamic_dimensions_.try_emplace(new_inst);\n iter.first->second.emplace(dynamic_dimension_new);\n }\n }\n return absl::OkStatus();\n}\nbool DynamicDimensionInference::HasDynamicDimension(\n HloInstruction* inst, ShapeIndexView index) const {\n bool has_dynamic_dim = false;\n ShapeUtil::ForEachSubshape(inst->shape(), [&](const Shape& subshape,\n const ShapeIndex& subindex) {\n if (subshape.IsTuple()) {\n return;\n }\n if (ShapeIndexView(subindex).subspan(0, index.size()) != index) {\n return;\n }\n for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {\n HloInstruction* operand_dynamic_size = GetDynamicSize(inst, subindex, i);\n if (operand_dynamic_size != nullptr) {\n has_dynamic_dim = true;\n }\n }\n });\n return has_dynamic_dim;\n}\nShape DynamicDimensionInference::GetDynamicShape(HloInstruction* inst) {\n Shape shape = inst->shape();\n ShapeUtil::ForEachMutableSubshape(\n &shape, [&](Shape* subshape, const ShapeIndex& index) {\n if (!subshape->IsArray()) {\n return;\n }\n for (int64_t dimension = 0; dimension < subshape->rank(); ++dimension) {\n if (GetDynamicSize(inst, index, dimension) != nullptr) {\n subshape->set_dynamic_dimension(dimension, true);\n }\n }\n });\n return shape;\n}\nHloInstruction* DynamicDimensionInference::GetDynamicSize(\n HloInstruction* inst, const ShapeIndex& index, int64_t dim) const {\n auto iter = dynamic_mapping_.find(DynamicDimension{inst, index, dim});\n if (iter != dynamic_mapping_.end()) {\n return iter->second;\n }\n return nullptr;\n}\nconst HloInstruction* DynamicDimensionInference::GetDynamicSize(\n const HloInstruction* inst, const ShapeIndex& index, int64_t dim) const {\n return GetDynamicSize(const_cast(inst), index, dim);\n}\nstd::vector DynamicDimensionInference::GetDynamicSizes(\n HloInstruction* inst, const ShapeIndex& index) const {\n CHECK(ShapeUtil::IndexIsValid(inst->shape(), index));\n const int64_t rank = ShapeUtil::GetSubshape(inst->shape(), index).rank();\n std::vector result(rank, nullptr);\n for (int64_t i = 0; i < rank; ++i) {\n result[i] = GetDynamicSize(inst, index, i);\n }\n return result;\n}\nbool DynamicDimensionInference::CanInfer(HloInstruction* hlo) {\n if (hlo->shape().is_static() && hlo->called_computations().empty() &&\n hlo->opcode() != HloOpcode::kCustomCall) {\n return false;\n }\n bool ok = true;\n for (int64_t operand_index = 0; operand_index < hlo->operand_count();\n ++operand_index) {\n ShapeUtil::ForEachSubshape(\n hlo->operand(operand_index)->shape(),\n [&](const Shape& subshape, const ShapeIndex& shape_index) {\n if (!subshape.IsArray()) {\n return;\n }\n for (int64_t dimension = 0; dimension < subshape.rank();\n ++dimension) {\n bool shape_is_dynamic = subshape.is_dynamic_dimension(dimension);\n bool dynamic_size_recorded =\n GetDynamicSize(hlo->operand(operand_index), shape_index,\n dimension) != nullptr;\n if (shape_is_dynamic && !dynamic_size_recorded) {\n VLOG(2) << \"cannot infer \" << hlo->ToShortString()\n << \" because operand \" << operand_index << \" (\"\n << hlo->operand(operand_index)->ToShortString() << \")\"\n << \" subshape \" << shape_index.ToString()\n << \" is missing dynamic size for dimension \" << dimension;\n ok = false;\n }\n CHECK(hlo->operand(operand_index)->opcode() ==\n HloOpcode::kSetDimensionSize ||\n hlo->operand(operand_index)->opcode() ==\n HloOpcode::kCustomCall ||\n !shape_is_dynamic || !dynamic_size_recorded);\n }\n });\n }\n return ok;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/dynamic_dimension_inference.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal.h\"\n#include \"xla/service/hlo_runner.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/filecheck.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test_benchmark.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nclass DynamicDimensionInferenceTest : public HloTestBase {\n protected:\n DynamicDimensionInferenceTest() : HloTestBase() {\n module_ = CreateNewVerifiedModule();\n }\n absl::Status RunInference(\n OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr,\n DynamicDimensionInference::CustomCallInferenceHandler handler = nullptr,\n DynamicDimensionInference::ShapeCheckMode shape_check_mode =\n DynamicDimensionInference::ShapeCheckMode::kIgnore,\n const DynamicDimensionInference::AssertionGenerator& assertion_generator =\n nullptr) {\n TF_ASSIGN_OR_RETURN(DynamicDimensionInference inference,\n DynamicDimensionInference::Run(\n module_.get(), op_supports_dynamism_handler,\n handler, shape_check_mode, assertion_generator));\n inference_ = std::make_unique(inference);\n return absl::OkStatus();\n }\n HloComputation* GetAdd() {\n auto embedded_builder = HloComputation::Builder(\"add\");\n auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {}), \"lhs\"));\n auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {}), \"rhs\"));\n embedded_builder.AddInstruction(\n HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));\n return module_->AddEmbeddedComputation(embedded_builder.Build());\n }\n HloComputation* GetAddTuple() {\n auto embedded_builder = HloComputation::Builder(\"add\");\n auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {}), \"lhs\"));\n auto lhs_1 =\n embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {}), \"lhs.1\"));\n auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 2, ShapeUtil::MakeShape(F32, {}), \"rhs\"));\n auto rhs_1 =\n embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 3, ShapeUtil::MakeShape(F32, {}), \"rhs.1\"));\n auto add = embedded_builder.AddInstruction(\n HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));\n auto add_1 = embedded_builder.AddInstruction(HloInstruction::CreateBinary(\n lhs->shape(), HloOpcode::kAdd, lhs_1, rhs_1));\n embedded_builder.AddInstruction(HloInstruction::CreateTuple({add, add_1}));\n return module_->AddEmbeddedComputation(embedded_builder.Build());\n }\n HloComputation* GetGe() {\n auto embedded_builder = HloComputation::Builder(\"ge\");\n auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {}), \"lhs\"));\n auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {}), \"rhs\"));\n embedded_builder.AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::MakeShape(PRED, {}), lhs, rhs, ComparisonDirection::kGe));\n return module_->AddEmbeddedComputation(embedded_builder.Build());\n }\n std::unique_ptr module_;\n std::unique_ptr inference_;\n const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});\n};\nTEST_F(DynamicDimensionInferenceTest, ParamTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, input_shape, \"param\"));\n auto param2 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"param\"));\n auto result = builder.AddInstruction(\n HloInstruction::CreateSetDimensionSize(dynamic_shape, param, param2, 1));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(result, {}, 1), param2);\n EXPECT_EQ(inference_->GetDynamicSize(param, {}, 0), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(param2, {}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, ElementwiseTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});\n auto data_param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, input_shape, \"data_param\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n auto dynamic_param =\n builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param, size_param, 1));\n auto* negate = builder.AddInstruction(HloInstruction::CreateUnary(\n dynamic_shape, HloOpcode::kNegate, dynamic_param));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(negate, {}, 1), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, ReduceTestI) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});\n auto reduce_shape = ShapeUtil::MakeShape(F32, {2}, {true});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false});\n auto data_param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, input_shape, \"data_param\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n auto dynamic_param =\n builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param, size_param, 1));\n auto negate = builder.AddInstruction(HloInstruction::CreateUnary(\n dynamic_shape, HloOpcode::kNegate, dynamic_param));\n auto init = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(\n reduce_shape, negate, init, {0, 2}, GetAdd()));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, ReduceTestII) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});\n auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});\n auto data_param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, input_shape, \"data_param\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n auto dynamic_param =\n builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param, size_param, 2));\n auto negate = builder.AddInstruction(HloInstruction::CreateUnary(\n dynamic_shape, HloOpcode::kNegate, dynamic_param));\n auto init = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto reduce = builder.AddInstruction(\n HloInstruction::CreateReduce(reduce_shape, negate, init, {1}, GetAdd()));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 1), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, VariadicReduce) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});\n auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});\n auto data_param_1 = builder.AddInstruction(\n HloInstruction::CreateParameter(0, input_shape, \"data_param\"));\n auto data_param_2 = builder.AddInstruction(\n HloInstruction::CreateParameter(1, input_shape, \"data_param.2\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(2, scalar_shape_, \"size_param\"));\n auto data_param_dynamic_1 =\n builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param_1, size_param, 2));\n auto data_param_dynamic_2 =\n builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param_2, size_param, 2));\n auto dynamic_negate_1 = builder.AddInstruction(HloInstruction::CreateUnary(\n dynamic_shape, HloOpcode::kNegate, data_param_dynamic_1));\n auto dynamic_negate_2 = builder.AddInstruction(HloInstruction::CreateUnary(\n dynamic_shape, HloOpcode::kNegate, data_param_dynamic_2));\n auto init = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(\n ShapeUtil::MakeTupleShape({reduce_shape, reduce_shape}),\n {dynamic_negate_1, dynamic_negate_2}, {init, init}, {1}, GetAddTuple()));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 1), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 1), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 0), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, DotTest) {\n auto builder = HloComputation::Builder(TestName());\n constexpr int xdim = 3;\n constexpr int ydim = 2;\n constexpr int zdim = 1;\n auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});\n auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});\n auto xy_dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});\n auto yz_dynamic_shape =\n ShapeUtil::MakeShape(F32, {ydim, zdim}, {true, false});\n auto xz_dynamic_shape =\n ShapeUtil::MakeShape(F32, {xdim, zdim}, {true, false});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, xy_shape, \"A\"));\n auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, yz_shape, \"B\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,\n size_param, 0));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n xy_dynamic_shape, a_param, size_param, 1));\n b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n yz_dynamic_shape, b_param, size_param, 0));\n DotDimensionNumbers dot_dnums;\n dot_dnums.add_lhs_contracting_dimensions(1);\n dot_dnums.add_rhs_contracting_dimensions(0);\n auto dot = builder.AddInstruction(\n HloInstruction::CreateDot(xz_dynamic_shape, a_param, b_param, dot_dnums,\n HloTestBase::DefaultPrecisionConfig(2)));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, DotTestBatch) {\n auto builder = HloComputation::Builder(TestName());\n auto lhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});\n auto rhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8});\n auto output_shape =\n ShapeUtil::MakeShape(F32, {4, 2, 128, 128}, {true, false, false, false});\n auto lhs_shape_dynamic =\n ShapeUtil::MakeShape(F32, {4, 128, 2, 8}, {true, false, false, false});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, lhs_shape, \"A\"));\n auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, rhs_shape, \"B\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n lhs_shape_dynamic, a_param, size_param, 0));\n DotDimensionNumbers dot_dnums;\n dot_dnums.add_lhs_contracting_dimensions(3);\n dot_dnums.add_rhs_contracting_dimensions(3);\n dot_dnums.add_lhs_batch_dimensions(0);\n dot_dnums.add_lhs_batch_dimensions(2);\n dot_dnums.add_rhs_batch_dimensions(0);\n dot_dnums.add_rhs_batch_dimensions(2);\n auto dot = builder.AddInstruction(\n HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,\n HloTestBase::DefaultPrecisionConfig(2)));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 3), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, DotTestMultiContracting) {\n auto builder = HloComputation::Builder(TestName());\n auto lhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 8, 64});\n auto rhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 512});\n auto output_shape = ShapeUtil::MakeShape(F32, {8, 64, 512});\n auto lhs_shape_dynamic =\n ShapeUtil::MakeShape(F32, {2, 2, 8, 64}, {true, true, false, false});\n auto rhs_shape_dynamic =\n ShapeUtil::MakeShape(F32, {2, 2, 512}, {true, true, false});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, lhs_shape, \"A\"));\n auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, rhs_shape, \"B\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, lhs_shape.dimensions(),\n {true, false, false, false}),\n a_param, size_param, 0));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n lhs_shape_dynamic, a_param, size_param, 1));\n b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, rhs_shape.dimensions(), {true, false, false}),\n b_param, size_param, 0));\n b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n rhs_shape_dynamic, b_param, size_param, 1));\n DotDimensionNumbers dot_dnums;\n dot_dnums.add_lhs_contracting_dimensions(0);\n dot_dnums.add_lhs_contracting_dimensions(1);\n dot_dnums.add_rhs_contracting_dimensions(0);\n dot_dnums.add_rhs_contracting_dimensions(1);\n auto dot = builder.AddInstruction(\n HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums,\n HloTestBase::DefaultPrecisionConfig(2)));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, ConvolutionTest) {\n auto builder = HloComputation::Builder(TestName());\n constexpr int xdim = 3;\n constexpr int ydim = 2;\n constexpr int zdim = 1;\n auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});\n auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});\n auto xy_shape_dynamic = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true});\n auto zx_shape_dynamic =\n ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, xy_shape, \"A\"));\n auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, yz_shape, \"B\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param,\n size_param, 0));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n xy_shape_dynamic, a_param, size_param, 1));\n auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);\n dnums.set_kernel_input_feature_dimension(0);\n dnums.set_kernel_output_feature_dimension(1);\n dnums.set_input_batch_dimension(0);\n dnums.set_output_batch_dimension(1);\n dnums.set_output_feature_dimension(0);\n Window window;\n auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(\n zx_shape_dynamic, a_param, b_param, 1,\n 1, window, dnums,\n HloTestBase::DefaultPrecisionConfig(2)));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 1), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, TransposeTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});\n auto output_shape = ShapeUtil::MakeShape(F32, {3, 2, 1}, {true, true, true});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(\n 3, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,\n size_param_1, 0));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,\n size_param_2, 1));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param_3, 2));\n auto* transpose = builder.AddInstruction(\n HloInstruction::CreateTranspose(output_shape, a_param, {2, 1, 0}));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);\n EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_2);\n EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_1);\n}\nTEST_F(DynamicDimensionInferenceTest, NonDescendingTransposeTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});\n auto output_shape = ShapeUtil::MakeShape(F32, {3, 1, 2}, {true, true, true});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, scalar_shape_, \"size_param\"));\n auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter(\n 3, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param,\n size_param_1, 0));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param,\n size_param_2, 1));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param_3, 2));\n auto* transpose = builder.AddInstruction(\n HloInstruction::CreateTranspose(output_shape, a_param, {2, 0, 1}));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3);\n EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_1);\n EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_2);\n}\nTEST_F(DynamicDimensionInferenceTest, ReshapeTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6});\n auto output_shape = ShapeUtil::MakeShape(\n F32, {6, 4, 1, 5, 2, 3}, {false, true, false, true, false, false});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},\n {false, false, true, true, false});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6},\n {false, false, true, false, false}),\n a_param, size_param, 2));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param, 3));\n auto* reshape = builder.AddInstruction(\n HloInstruction::CreateReshape(output_shape, a_param));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 0), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 1), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 2), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 3), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 4), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 5), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, ReshapeInferredDimensionTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});\n auto output_shape =\n ShapeUtil::MakeShape(F32, {1, 4, 5}, {true, false, false});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {true, false});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param, 0));\n auto* reshape = builder.AddInstruction(HloInstruction::CreateReshape(\n output_shape, a_param, 0));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, ReshapeTestMajorDimension) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {32, 10, 4});\n auto output_shape = ShapeUtil::MakeShape(F32, {320, 4}, {true, false});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {32, 10, 4}, {true, false, false});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param, 0));\n auto* reshape = builder.AddInstruction(\n HloInstruction::CreateReshape(output_shape, a_param));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n absl::Status status = RunInference();\n EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, ReshapeIntoScalar) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {1});\n auto output_shape = ShapeUtil::MakeShape(F32, {});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {1}, {true});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param, 0));\n builder.AddInstruction(HloInstruction::CreateReshape(output_shape, a_param));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_CHECK_OK(RunInference());\n}\nTEST_F(DynamicDimensionInferenceTest, GatherTest) {\n const std::string hlo_text = R\"(\nHloModule TensorFlowGatherV2\nENTRY main {\n operand = s32[20,10]{1,0} parameter(0)\n indices = s32[32,20] parameter(1)\n dynamic_size = s32[] parameter(2)\n indices_dynamic = s32[<=32,20] set-dimension-size(indices, dynamic_size), dimensions={0}\n ROOT gather = s32[<=32,20,10]{2,1,0} gather(%operand, %indices_dynamic),\n offset_dims={2},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=2,\n slice_sizes={1,10}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text));\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(\n module_->entry_computation()->root_instruction(), {}, 0),\n module_->entry_computation()->parameter_instruction(2));\n}\nTEST_F(DynamicDimensionInferenceTest, BroadcastTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {2});\n auto output_shape =\n ShapeUtil::MakeShape(F32, {3, 2, 4}, {false, true, false});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {2}, {true});\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param, 0));\n auto* broadcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(output_shape, a_param, {1}));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 0), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 1), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 2), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, WhileTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});\n auto tuple_shape = ShapeUtil::MakeTupleShape({input_shape, input_shape});\n auto dynamic_tuple_shape =\n ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape});\n auto body_builder = HloComputation::Builder(\"body\");\n auto body_param = body_builder.AddInstruction(\n HloInstruction::CreateParameter(0, dynamic_tuple_shape, \"param\"));\n auto gte_0 = body_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 0));\n auto gte_1 = body_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 1));\n auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(\n dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));\n body_builder.AddInstruction(HloInstruction::CreateTuple({add, add}));\n HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());\n auto cond_builder = HloComputation::Builder(\"condition\");\n cond_builder.AddInstruction(\n HloInstruction::CreateParameter(0, dynamic_tuple_shape, \"param\"));\n cond_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(false)));\n HloComputation* condition =\n module_->AddEmbeddedComputation(cond_builder.Build());\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, tuple_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n auto* a_0 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));\n a_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_0, size_param, 0));\n auto* a_1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape, a_param, 0));\n a_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_1, size_param, 0));\n a_param = builder.AddInstruction(HloInstruction::CreateTuple({a_0, a_1}));\n builder.AddInstruction(HloInstruction::CreateWhile(dynamic_tuple_shape,\n condition, body, a_param));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n HloInstruction* while_hlo = nullptr;\n for (HloInstruction* inst : module_->entry_computation()->instructions()) {\n if (inst->opcode() == HloOpcode::kWhile) {\n while_hlo = inst;\n }\n }\n ASSERT_NE(while_hlo, nullptr);\n EXPECT_EQ(while_hlo->shape().tuple_shapes_size(), 4);\n HloInstruction* add_inst = nullptr;\n for (HloInstruction* inst : while_hlo->while_body()->instructions()) {\n if (inst->opcode() == HloOpcode::kAdd) {\n add_inst = inst;\n }\n }\n EXPECT_NE(add_inst, nullptr);\n EXPECT_NE(inference_->GetDynamicSize(add_inst, {}, 0), nullptr);\n EXPECT_NE(inference_->GetDynamicSize(\n module_->entry_computation()->root_instruction(), {0}, 0),\n nullptr);\n EXPECT_NE(inference_->GetDynamicSize(\n module_->entry_computation()->root_instruction(), {1}, 0),\n nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, ConditionalInputTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});\n auto output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});\n auto tuple_shape_1 = ShapeUtil::MakeTupleShape({input_shape});\n auto tuple_shape_2 = ShapeUtil::MakeTupleShape({input_shape, input_shape});\n auto tuple_shape_3 =\n ShapeUtil::MakeTupleShape({input_shape, input_shape, input_shape});\n auto tuple_shape_2_dynamic =\n ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape});\n auto tuple_shape_3_dynamic =\n ShapeUtil::MakeTupleShape({input_shape, dynamic_shape, dynamic_shape});\n auto true_builder = HloComputation::Builder(\"true\");\n {\n auto true_param = true_builder.AddInstruction(\n HloInstruction::CreateParameter(0, tuple_shape_2_dynamic, \"param\"));\n auto gte_0 = true_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 0));\n auto gte_1 = true_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 1));\n auto add = true_builder.AddInstruction(HloInstruction::CreateBinary(\n dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));\n true_builder.AddInstruction(HloInstruction::CreateTuple({add}));\n }\n HloComputation* true_branch =\n module_->AddEmbeddedComputation(true_builder.Build());\n auto false_builder = HloComputation::Builder(\"false\");\n {\n auto false_param = false_builder.AddInstruction(\n HloInstruction::CreateParameter(0, tuple_shape_3_dynamic, \"param\"));\n auto gte_0 = false_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 1));\n auto gte_1 = false_builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 2));\n auto add = false_builder.AddInstruction(HloInstruction::CreateBinary(\n dynamic_shape, HloOpcode::kAdd, gte_0, gte_1));\n false_builder.AddInstruction(HloInstruction::CreateTuple({add}));\n }\n HloComputation* false_branch =\n module_->AddEmbeddedComputation(false_builder.Build());\n auto* pred_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeScalarShape(PRED), \"pred\"));\n auto* tuple_2_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, tuple_shape_2, \"tuple_2_param\"));\n auto* tuple_3_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, tuple_shape_3, \"tuple_3_param\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 3, scalar_shape_, \"size_param\"));\n auto* param_2_0 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 0));\n param_2_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, param_2_0, size_param, 0));\n auto* param_2_1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 1));\n param_2_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, param_2_1, size_param, 0));\n tuple_2_param = builder.AddInstruction(\n HloInstruction::CreateTuple({param_2_0, param_2_1}));\n auto* param_3_0 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 0));\n auto* param_3_1 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 1));\n param_3_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, param_3_1, size_param, 0));\n auto* param_3_2 = builder.AddInstruction(\n HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 2));\n param_3_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, param_3_1, size_param, 0));\n tuple_3_param = builder.AddInstruction(\n HloInstruction::CreateTuple({param_3_0, param_3_1, param_3_2}));\n builder.AddInstruction(HloInstruction::CreateConditional(\n tuple_shape_1, pred_param, tuple_2_param, true_branch, tuple_3_param,\n false_branch));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n HloInstruction* conditional_hlo = nullptr;\n for (HloInstruction* inst : module_->entry_computation()->instructions()) {\n if (inst->opcode() == HloOpcode::kConditional) {\n conditional_hlo = inst;\n }\n }\n ASSERT_NE(conditional_hlo, nullptr);\n EXPECT_EQ(conditional_hlo->shape().tuple_shapes_size(), 2);\n HloInstruction* add_true_branch = nullptr;\n for (HloInstruction* inst :\n conditional_hlo->true_computation()->instructions()) {\n if (inst->opcode() == HloOpcode::kAdd) {\n add_true_branch = inst;\n }\n }\n EXPECT_NE(add_true_branch, nullptr);\n EXPECT_NE(inference_->GetDynamicSize(add_true_branch, {}, 0), nullptr);\n HloInstruction* add_false_branch = nullptr;\n for (HloInstruction* inst :\n conditional_hlo->false_computation()->instructions()) {\n if (inst->opcode() == HloOpcode::kAdd) {\n add_false_branch = inst;\n }\n }\n EXPECT_NE(add_false_branch, nullptr);\n EXPECT_NE(inference_->GetDynamicSize(add_false_branch, {}, 0), nullptr);\n EXPECT_NE(inference_->GetDynamicSize(conditional_hlo, {0}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, ReduceWindowBatchTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});\n auto output_shape =\n ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false});\n auto dynamic_shape =\n ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});\n Window window;\n WindowDimension* batch_dim = window.add_dimensions();\n batch_dim->set_size(1);\n batch_dim->set_stride(1);\n batch_dim->set_padding_low(0);\n batch_dim->set_padding_high(0);\n batch_dim->set_window_dilation(1);\n batch_dim->set_base_dilation(1);\n for (int64_t i = 0; i < 2; ++i) {\n WindowDimension* dim = window.add_dimensions();\n dim->set_size(2);\n dim->set_stride(2);\n dim->set_padding_low(0);\n dim->set_padding_high(0);\n dim->set_window_dilation(1);\n dim->set_base_dilation(1);\n }\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, a_param, size_param, 0));\n auto init = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto* reduce_window =\n builder.AddInstruction(HloInstruction::CreateReduceWindow(\n output_shape, a_param, init, window, GetAdd()));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(reduce_window, {}, 0), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, SelectAndScatterTest) {\n auto builder = HloComputation::Builder(TestName());\n auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});\n auto source_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});\n auto input_shape_dynamic =\n ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false});\n auto source_shape_dynamic =\n ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false});\n Window window;\n WindowDimension* batch_dim = window.add_dimensions();\n batch_dim->set_size(1);\n batch_dim->set_stride(1);\n batch_dim->set_padding_low(0);\n batch_dim->set_padding_high(0);\n batch_dim->set_window_dilation(1);\n batch_dim->set_base_dilation(1);\n for (int64_t i = 0; i < 2; ++i) {\n WindowDimension* dim = window.add_dimensions();\n dim->set_size(2);\n dim->set_stride(2);\n dim->set_padding_low(0);\n dim->set_padding_high(0);\n dim->set_window_dilation(1);\n dim->set_base_dilation(1);\n }\n auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, input_shape, \"A\"));\n auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, scalar_shape_, \"size_param\"));\n auto* source = builder.AddInstruction(HloInstruction::CreateParameter(\n 2, source_shape, \"B\"));\n a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n input_shape_dynamic, a_param, size_param, 0));\n source = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n source_shape_dynamic, source, size_param, 0));\n auto init = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0)));\n auto* sns = builder.AddInstruction(HloInstruction::CreateSelectAndScatter(\n input_shape_dynamic, a_param, GetGe(), window, source, init, GetAdd()));\n module_->AddEntryComputation(builder.Build());\n SCOPED_TRACE(module_->ToString());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(sns, {}, 0), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, ConcatTest) {\n auto builder = HloComputation::Builder(TestName());\n auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {5, 7}), \"data_param_1\"));\n auto data_param_2 = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {5, 8}), \"data_param_2\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(2, scalar_shape_, \"size_param\"));\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,\n 0));\n data_param_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {5, 8}, {true, false}), data_param_2,\n size_param, 0));\n auto* concat = builder.AddInstruction(HloInstruction::CreateConcatenate(\n ShapeUtil::MakeShape(F32, {5, 15}, {true, false}),\n {data_param, data_param_2}, 1));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(concat, {}, 0), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, SliceTest) {\n auto builder = HloComputation::Builder(TestName());\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {false, true});\n auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {5, 7}), \"data_param\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param, size_param, 1));\n auto* slice = builder.AddInstruction(HloInstruction::CreateSlice(\n dynamic_shape, data_param,\n {0, 0},\n {5, 7}, {1, 1}));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 1), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, DynamicSliceTest) {\n auto builder = HloComputation::Builder(TestName());\n auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {5, 7}), \"data_param\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n std::vector params;\n for (int i = 0; i < 2; ++i) {\n params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(\n i + 2, ShapeUtil::MakeShape(S32, {}), \"slice_indices\")));\n }\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,\n 0));\n auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(\n ShapeUtil::MakeShape(F32, {5, 1}, {true, false}), data_param, params,\n {5, 1}));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, SortTest) {\n auto builder = HloComputation::Builder(TestName());\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false});\n auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {5, 7}), \"data_param\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n auto compare_builder = HloComputation::Builder(\"condition\");\n compare_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {}), \"param1\"));\n compare_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {}), \"param2\"));\n compare_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(false)));\n HloComputation* compare =\n module_->AddEmbeddedComputation(compare_builder.Build());\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param, size_param, 0));\n auto* sort = builder.AddInstruction(\n HloInstruction::CreateSort(dynamic_shape, 1, {data_param}, compare,\n false));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(sort, {}, 0), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, MultiValueSortTest) {\n auto builder = HloComputation::Builder(TestName());\n auto shape = ShapeUtil::MakeShape(F32, {5, 7});\n auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false});\n auto data_param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, shape, \"data_param\"));\n auto size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n auto compare_builder = HloComputation::Builder(\"condition\");\n compare_builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {}), \"param1\"));\n compare_builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(F32, {}), \"param2\"));\n compare_builder.AddInstruction(HloInstruction::CreateParameter(\n 2, ShapeUtil::MakeShape(F32, {}), \"param3\"));\n compare_builder.AddInstruction(HloInstruction::CreateParameter(\n 3, ShapeUtil::MakeShape(F32, {}), \"param4\"));\n compare_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(false)));\n HloComputation* compare =\n module_->AddEmbeddedComputation(compare_builder.Build());\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n dynamic_shape, data_param, size_param, 0));\n auto* sort = builder.AddInstruction(HloInstruction::CreateSort(\n ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape}), 1,\n {data_param, data_param}, compare,\n false));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(sort, {0}, 0), size_param);\n EXPECT_EQ(inference_->GetDynamicSize(sort, {1}, 0), size_param);\n}\nTEST_F(DynamicDimensionInferenceTest, DynamicSliceSingleElementTest) {\n auto builder = HloComputation::Builder(TestName());\n auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {5, 7}), \"data_param\"));\n auto* size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n std::vector params;\n for (int i = 0; i < 2; ++i) {\n params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(\n i + 2, ShapeUtil::MakeShape(S32, {}), \"slice_indices\")));\n }\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,\n 0));\n auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(\n ShapeUtil::MakeShape(F32, {1, 1}), data_param, params,\n {1, 1}));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), nullptr);\n}\nTEST_F(DynamicDimensionInferenceTest, InfersCustomOp) {\n auto builder = HloComputation::Builder(TestName());\n auto data_param = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {5, 7}), \"data_param\"));\n auto* size_param = builder.AddInstruction(\n HloInstruction::CreateParameter(1, scalar_shape_, \"size_param\"));\n data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param,\n 0));\n builder.AddInstruction(HloInstruction::CreateCustomCall(\n ShapeUtil::MakeShape(F32, {1, 1}), {data_param}, \"MyCustomOp\", \"\"));\n module_->AddEntryComputation(builder.Build());\n bool handler_called = false;\n auto handler = [&](HloInstruction* hlo,\n DynamicDimensionInference* inference) {\n CHECK(inference != nullptr);\n CHECK(Cast(hlo) != nullptr);\n handler_called = true;\n return absl::OkStatus();\n };\n TF_ASSERT_OK(RunInference(nullptr, handler));\n EXPECT_TRUE(handler_called);\n}\nTEST_F(DynamicDimensionInferenceTest, DynamicReshapeOp) {\n auto builder = HloComputation::Builder(TestName());\n auto input = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {9}), \"data_input\"));\n auto six = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(6)));\n auto dynamic_input =\n builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {9}, {true}), input, six, 0));\n auto dynamic_size = builder.AddInstruction(HloInstruction::CreateParameter(\n 1, ShapeUtil::MakeShape(S32, {}), \"size_param\"));\n auto three = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(3)));\n auto dynamic_reshape =\n builder.AddInstruction(HloInstruction::CreateDynamicReshape(\n ShapeUtil::MakeShape(F32, {3, 3}, {false, true}), dynamic_input,\n {three, dynamic_size}));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), dynamic_size);\n}\nTEST_F(DynamicDimensionInferenceTest, ReshapeOpWithMultipleDynamicDimensions) {\n auto builder = HloComputation::Builder(TestName());\n auto input = builder.AddInstruction(HloInstruction::CreateParameter(\n 0, ShapeUtil::MakeShape(F32, {9, 2}), \"data_input\"));\n auto six = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(6)));\n input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {9, 2}, {true, false}), input, six, 0));\n auto one = builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(1)));\n input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(\n ShapeUtil::MakeShape(F32, {9, 2}, {true, true}), input, one, 1));\n auto dynamic_reshape = builder.AddInstruction(HloInstruction::CreateReshape(\n ShapeUtil::MakeShape(F32, {9, 1, 2}, {true, false, true}), input));\n module_->AddEntryComputation(builder.Build());\n TF_ASSERT_OK(RunInference());\n EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), six);\n EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), nullptr);\n EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 2), one);\n}\nTEST_F(DynamicDimensionInferenceTest, HandleMapInDynamicDimensionInference) {\n const char* module_str = R\"(\nHloModule test_module\n%scatter-combiner.285 (p0.286: c128[], p1.287: c128[]) -> c128[] {\n %p0.286 = c128[] parameter(0)\n %p1.287 = c128[] parameter(1)\n ROOT %add.288 = c128[] add(c128[] %p0.286, c128[] %p1.287)\n}\n %while_body {\n %reshape.8 = s32[] parameter(4)\n %reshape.7 = c128[1]{0} parameter(3)\n %reduce = pred[] parameter(2)\n %concatenate = s32[1]{0} parameter(1)\n %slice.4 = s32[1]{0} slice(s32[1]{0} %concatenate), slice={[0 : 1]}\n %broadcast.7 = pred[1]{0} broadcast(pred[] %reduce), dimensions={}\n %param.1 = (s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) parameter(0)\n %get-tuple-element.2 = c128[<=1]{0} get-tuple-element((s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) %param.1), index=1\n %dynamic-slice.2 = c128[1]{0} dynamic-slice(c128[<=1]{0} %get-tuple-element.2,s32[] %reshape.8), dynamic_slice_sizes={1}\n %map = c128[1]{0} map(c128[1]{0} %dynamic-slice.2,c128[1]{0} %reshape.7), dimensions={0}, to_apply=%scatter-combiner.285\n %select = c128[1]{0} select(pred[1]{0} %broadcast.7,c128[1]{0} %map,c128[1]{0} %dynamic-slice.2)\n %reshape.9 = s32[] reshape(s32[1]{0} %slice.4)\n %dynamic-update-slice = c128[<=1]{0} dynamic-update-slice(c128[<=1]{0} %get-tuple-element.2,c128[1]{0} %select,s32[] %reshape.9)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnUnverifiedModule(module_str));\n TF_ASSERT_OK(RunInference());\n}\nTEST_F(DynamicDimensionInferenceTest, RuntimeShapeCheck) {\n const char* hlo = R\"(\nHloModule module\nENTRY computation {\n a = f32[20,20] parameter(0)\n a_size_1 = s32[] parameter(1)\n a_size_2 = s32[] parameter(2)\n a_dynamic_1 = f32[<=20,20] set-dimension-size(a, a_size_1), dimensions={0}\n a_dynamic_2 = f32[<=20,<=20] set-dimension-size(a_dynamic_1, a_size_2), dimensions={1}\n b = f32[20,20] parameter(3)\n b_size_1 = s32[] parameter(4)\n b_size_2 = s32[] parameter(5)\n b_dynamic_1 = f32[<=20,20] set-dimension-size(b, b_size_1), dimensions={0}\n b_dynamic_2 = f32[<=20,<=20] set-dimension-size(b_dynamic_1, b_size_2), dimensions={1}\n ROOT f = add(a_dynamic_2, b_dynamic_2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo));\n TF_ASSERT_OK(RunInference(\n nullptr,\n nullptr, DynamicDimensionInference::ShapeCheckMode::kRuntime,\n [&](HloInstruction* constraint) {\n constraint->parent()->AddInstruction(HloInstruction::CreateCustomCall(\n ShapeUtil::MakeTokenShape(), {constraint},\n \"__xla__assert\",\n std::string{}, API_VERSION_STATUS_RETURNING));\n }));\n absl::StatusOr filecheck_result = RunFileCheck(module_->ToString({}),\n R\"(\n )\");\n TF_ASSERT_OK(filecheck_result.status());\n EXPECT_TRUE(*filecheck_result);\n}\nTEST_F(DynamicDimensionInferenceTest, NestedControlFlow) {\n const char* hlo = R\"(\nHloModule tfcompile.377, entry_computation_layout={(s32[], f32[250]{0}, pred[], pred[], s32[], pred[], s32[], pred[])->(f32[3]{0})}\ncond_2_Sum-reduction.17 {\n x.18 = f32[] parameter(0)\n y.19 = f32[] parameter(1)\n ROOT add.20 = f32[] add(x.18, y.19)\n}\ncond_2_cond_true_214__.21 {\n arg_tuple.22 = () parameter(0)\n constant.23 = s32[] constant(1)\n reshape.24 = s32[] reshape(constant.23)\n ROOT tuple.25 = (s32[]) tuple(constant.23)\n}\ncond_2_cond_false_215__.26 {\n arg_tuple.27 = () parameter(0)\n constant.28 = s32[] constant(0)\n reshape.29 = s32[] reshape(constant.28)\n ROOT tuple.30 = (s32[]) tuple(constant.28)\n}\ncond_2_true_195__.31 {\n arg_tuple.32 = (s32[], f32[250]{0}) parameter(0)\n get-tuple-element.33 = s32[] get-tuple-element(arg_tuple.32), index=0\n constant.35 = s32[] constant(20)\n minimum.36 = s32[] minimum(get-tuple-element.33, constant.35)\n reshape.37 = s32[1]{0} reshape(minimum.36)\n concatenate.38 = s32[1]{0} concatenate(reshape.37), dimensions={0}\n slice.48 = s32[1]{0} slice(concatenate.38), slice={[0:1]}\n reshape.49 = s32[] reshape(reshape.37)\n constant.43 = s32[] constant(0)\n compare.50 = pred[] compare(minimum.36, constant.43), direction=LT\n constant.44 = s32[] constant(250)\n add.51 = s32[] add(constant.44, minimum.36)\n select.52 = s32[] select(compare.50, add.51, minimum.36)\n constant.45 = s32[1]{0} constant({0})\n slice.46 = s32[1]{0} slice(constant.45), slice={[0:1]}\n reshape.47 = s32[] reshape(slice.46)\n subtract.53 = s32[] subtract(select.52, reshape.47)\n maximum.54 = s32[] maximum(subtract.53, constant.43)\n convert.55 = s32[] convert(maximum.54)\n get-tuple-element.34 = f32[250]{0} get-tuple-element(arg_tuple.32), index=1\n constant.39 = f32[] constant(0)\n pad.40 = f32[500]{0} pad(get-tuple-element.34, constant.39), padding=0_250\n constant.41 = s32[] constant(500)\n set-dimension-size.42 = f32[500]{0} set-dimension-size(pad.40, constant.41), dimensions={0}\n dynamic-slice.56 = f32[250]{0} dynamic-slice(set-dimension-size.42, reshape.47), dynamic_slice_sizes={250}\n reshape.57 = f32[250]{0} reshape(dynamic-slice.56)\n set-dimension-size.58 = f32[<=250]{0} set-dimension-size(dynamic-slice.56, maximum.54), dimensions={0}\n constant.59 = f32[] constant(1)\n broadcast.60 = f32[250]{0} broadcast(constant.59), dimensions={}\n compare.61 = pred[<=250]{0} compare(set-dimension-size.58, broadcast.60), direction=GE\n convert.62 = f32[<=250]{0} convert(compare.61)\n convert.63 = f32[<=250]{0} convert(convert.62)\n constant.64 = f32[] constant(0)\n convert.65 = f32[] convert(constant.64)\n reduce.66 = f32[] reduce(convert.62, constant.64), dimensions={0}, to_apply=cond_2_Sum-reduction.17\n convert.67 = f32[] convert(reduce.66)\n reshape.73 = f32[] reshape(reduce.66)\n constant.68 = f32[] constant(6)\n compare.69 = pred[] compare(reduce.66, constant.68), direction=GE\n tuple.70 = () tuple()\n conditional.71 = (s32[]) conditional(compare.69, tuple.70, tuple.70), true_computation=cond_2_cond_true_214__.21, false_computation=cond_2_cond_false_215__.26\n get-tuple-element.72 = s32[] get-tuple-element(conditional.71), index=0\n reshape.74 = s32[] reshape(get-tuple-element.72)\n ROOT tuple.75 = (f32[], s32[]) tuple(reduce.66, get-tuple-element.72)\n} \ncond_2_false_196__.76 {\n arg_tuple.77 = (s32[], f32[250]{0}) parameter(0)\n constant.80 = f32[] constant(0)\n reshape.82 = f32[] reshape(constant.80)\n constant.81 = s32[] constant(0)\n reshape.83 = s32[] reshape(constant.81)\n ROOT tuple.84 = (f32[], s32[]) tuple(constant.80, constant.81)\n} \ncond_true_10__.85 {\n arg_tuple.86 = (pred[], pred[], pred[]) parameter(0)\n get-tuple-element.87 = pred[] get-tuple-element(arg_tuple.86), index=0\n reshape.90 = pred[] reshape(get-tuple-element.87)\n ROOT tuple.91 = (pred[]) tuple(get-tuple-element.87)\n}\ncond_cond_true_16__.92 {\n arg_tuple.93 = (pred[], pred[]) parameter(0)\n get-tuple-element.94 = pred[] get-tuple-element(arg_tuple.93), index=0\n reshape.96 = pred[] reshape(get-tuple-element.94)\n ROOT tuple.97 = (pred[]) tuple(get-tuple-element.94)\n}\ncond_cond_false_17__.98 {\n arg_tuple.99 = (pred[], pred[]) parameter(0)\n get-tuple-element.101 = pred[] get-tuple-element(arg_tuple.99), index=1\n reshape.102 = pred[] reshape(get-tuple-element.101)\n ROOT tuple.103 = (pred[]) tuple(get-tuple-element.101)\n}\ncond_false_11__.104 {\n arg_tuple.105 = (pred[], pred[], pred[]) parameter(0)\n get-tuple-element.107 = pred[] get-tuple-element(arg_tuple.105), index=1\n get-tuple-element.108 = pred[] get-tuple-element(arg_tuple.105), index=2\n tuple.109 = (pred[], pred[]) tuple(get-tuple-element.107, get-tuple-element.108)\n conditional.110 = (pred[]) conditional(get-tuple-element.107, tuple.109, tuple.109), true_computation=cond_cond_true_16__.92, false_computation=cond_cond_false_17__.98\n get-tuple-element.111 = pred[] get-tuple-element(conditional.110), index=0\n reshape.112 = pred[] reshape(get-tuple-element.111)\n ROOT tuple.113 = (pred[]) tuple(get-tuple-element.111)\n} \ncond_1_map_while_cond_true_82__.114 {\n arg_tuple.115 = (f32[]) parameter(0)\n constant.117 = f32[] constant(0)\n reshape.118 = f32[] reshape(constant.117)\n ROOT tuple.119 = (f32[]) tuple(constant.117)\n}\ncond_1_map_while_cond_cond_true_91__.120 {\n constant.123 = f32[] constant(0.1)\n arg_tuple.121 = (f32[]) parameter(0)\n get-tuple-element.122 = f32[] get-tuple-element(arg_tuple.121), index=0\n multiply.124 = f32[] multiply(constant.123, get-tuple-element.122)\n constant.125 = f32[] constant(0)\n add.126 = f32[] add(multiply.124, constant.125)\n constant.127 = f32[] constant(0.9)\n divide.128 = f32[] divide(add.126, constant.127)\n reshape.129 = f32[] reshape(divide.128)\n ROOT tuple.130 = (f32[]) tuple(divide.128)\n} \ncond_1_map_while_cond_cond_cond_true_106__.131 {\n constant.134 = f32[] constant(0.8)\n arg_tuple.132 = (f32[]) parameter(0)\n get-tuple-element.133 = f32[] get-tuple-element(arg_tuple.132), index=0\n multiply.135 = f32[] multiply(constant.134, get-tuple-element.133)\n constant.136 = f32[] constant(-0.711)\n add.137 = f32[] add(multiply.135, constant.136)\n constant.138 = f32[] constant(0.09)\n divide.139 = f32[] divide(add.137, constant.138)\n reshape.140 = f32[] reshape(divide.139)\n ROOT tuple.141 = (f32[]) tuple(divide.139)\n} \ncond_1_map_while_cond_cond_cond_cond_true_121__.142 {\n constant.145 = f32[] constant(0.2)\n arg_tuple.143 = (f32[]) parameter(0)\n get-tuple-element.144 = f32[] get-tuple-element(arg_tuple.143), index=0\n multiply.146 = f32[] multiply(constant.145, get-tuple-element.144)\n constant.147 = f32[] constant(-0.18)\n add.148 = f32[] add(multiply.146, constant.147)\n constant.149 = f32[] constant(0.02)\n divide.150 = f32[] divide(add.148, constant.149)\n reshape.151 = f32[] reshape(divide.150)\n ROOT tuple.152 = (f32[]) tuple(divide.150)\n} \ncond_1_map_while_cond_cond_cond_cond_cond_true_136__.153 {\n constant.156 = f32[] constant(0.1)\n arg_tuple.154 = (f32[]) parameter(0)\n get-tuple-element.155 = f32[] get-tuple-element(arg_tuple.154), index=0\n multiply.157 = f32[] multiply(constant.156, get-tuple-element.155)\n constant.158 = f32[] constant(108.788)\n add.159 = f32[] add(multiply.157, constant.158)\n constant.160 = f32[] constant(98.99)\n divide.161 = f32[] divide(add.159, constant.160)\n reshape.162 = f32[] reshape(divide.161)\n ROOT tuple.163 = (f32[]) tuple(divide.161)\n} \ncond_1_map_while_cond_cond_cond_cond_cond_false_137__.164 {\n arg_tuple.165 = (f32[]) parameter(0)\n constant.167 = f32[] constant(1.2)\n reshape.168 = f32[] reshape(constant.167)\n ROOT tuple.169 = (f32[]) tuple(constant.167)\n}\ncond_1_map_while_cond_cond_cond_cond_false_122__.170 {\n arg_tuple.171 = (f32[]) parameter(0)\n get-tuple-element.172 = f32[] get-tuple-element(arg_tuple.171), index=0\n constant.173 = f32[] constant(100)\n compare.174 = pred[] compare(get-tuple-element.172, constant.173), direction=LE\n tuple.175 = (f32[]) tuple(get-tuple-element.172)\n conditional.176 = (f32[]) conditional(compare.174, tuple.175, tuple.175), true_computation=cond_1_map_while_cond_cond_cond_cond_cond_true_136__.153, false_computation=cond_1_map_while_cond_cond_cond_cond_cond_false_137__.164\n get-tuple-element.177 = f32[] get-tuple-element(conditional.176), index=0\n reshape.178 = f32[] reshape(get-tuple-element.177)\n ROOT tuple.179 = (f32[]) tuple(get-tuple-element.177)\n} \ncond_1_map_while_cond_cond_cond_false_107__.180 {\n arg_tuple.181 = (f32[]) parameter(0)\n get-tuple-element.182 = f32[] get-tuple-element(arg_tuple.181), index=0\n constant.183 = f32[] constant(1.01)\n compare.184 = pred[] compare(get-tuple-element.182, constant.183), direction=LE\n tuple.185 = (f32[]) tuple(get-tuple-element.182)\n conditional.186 = (f32[]) conditional(compare.184, tuple.185, tuple.185), true_computation=cond_1_map_while_cond_cond_cond_cond_true_121__.142, false_computation=cond_1_map_while_cond_cond_cond_cond_false_122__.170\n get-tuple-element.187 = f32[] get-tuple-element(conditional.186), index=0\n reshape.188 = f32[] reshape(get-tuple-element.187)\n ROOT tuple.189 = (f32[]) tuple(get-tuple-element.187)\n} \ncond_1_map_while_cond_cond_false_92__.190 {\n arg_tuple.191 = (f32[]) parameter(0)\n get-tuple-element.192 = f32[] get-tuple-element(arg_tuple.191), index=0\n constant.193 = f32[] constant(0.99)\n compare.194 = pred[] compare(get-tuple-element.192, constant.193), direction=LE\n tuple.195 = (f32[]) tuple(get-tuple-element.192)\n conditional.196 = (f32[]) conditional(compare.194, tuple.195, tuple.195), true_computation=cond_1_map_while_cond_cond_cond_true_106__.131, false_computation=cond_1_map_while_cond_cond_cond_false_107__.180\n get-tuple-element.197 = f32[] get-tuple-element(conditional.196), index=0\n reshape.198 = f32[] reshape(get-tuple-element.197)\n ROOT tuple.199 = (f32[]) tuple(get-tuple-element.197)\n} \ncond_1_map_while_cond_false_83__.200 {\n arg_tuple.201 = (f32[]) parameter(0)\n get-tuple-element.202 = f32[] get-tuple-element(arg_tuple.201), index=0\n constant.203 = f32[] constant(0.9)\n compare.204 = pred[] compare(get-tuple-element.202, constant.203), direction=LE\n tuple.205 = (f32[]) tuple(get-tuple-element.202)\n conditional.206 = (f32[]) conditional(compare.204, tuple.205, tuple.205), true_computation=cond_1_map_while_cond_cond_true_91__.120, false_computation=cond_1_map_while_cond_cond_false_92__.190\n get-tuple-element.207 = f32[] get-tuple-element(conditional.206), index=0\n reshape.208 = f32[] reshape(get-tuple-element.207)\n ROOT tuple.209 = (f32[]) tuple(get-tuple-element.207)\n} \ncond_1_map_while_body_59__.210 {\n arg_tuple.211 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0)\n get-tuple-element.212 = s32[] get-tuple-element(arg_tuple.211), index=0\n constant.218 = s32[] constant(1)\n add.219 = s32[] add(get-tuple-element.212, constant.218)\n reshape.239 = s32[] reshape(add.219)\n get-tuple-element.213 = s32[] get-tuple-element(arg_tuple.211), index=1\n reshape.240 = s32[] reshape(get-tuple-element.213)\n get-tuple-element.214 = s32[] get-tuple-element(arg_tuple.211), index=2\n constant.220 = s32[] constant(1)\n add.221 = s32[] add(get-tuple-element.214, constant.220)\n reshape.241 = s32[] reshape(add.221)\n get-tuple-element.216 = s32[] get-tuple-element(arg_tuple.211), index=4\n reshape.242 = s32[] reshape(get-tuple-element.216)\n get-tuple-element.215 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=3\n get-tuple-element.235 = f32[<=250]{0} get-tuple-element(get-tuple-element.215), index=0\n get-tuple-element.217 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=5\n get-tuple-element.223 = f32[<=250]{0} get-tuple-element(get-tuple-element.217), index=0\n dynamic-slice.224 = f32[1]{0} dynamic-slice(get-tuple-element.223, get-tuple-element.214), dynamic_slice_sizes={1}\n reshape.225 = f32[] reshape(dynamic-slice.224)\n constant.226 = f32[] constant(0)\n compare.227 = pred[] compare(reshape.225, constant.226), direction=LE\n tuple.228 = (f32[]) tuple(reshape.225)\n conditional.229 = (f32[]) conditional(compare.227, tuple.228, tuple.228), true_computation=cond_1_map_while_cond_true_82__.114, false_computation=cond_1_map_while_cond_false_83__.200\n get-tuple-element.230 = f32[] get-tuple-element(conditional.229), index=0\n reshape.233 = f32[1]{0} reshape(get-tuple-element.230)\n dynamic-update-slice.236 = f32[<=250]{0} dynamic-update-slice(get-tuple-element.235, reshape.233, get-tuple-element.214)\n get-tuple-element.237 = s32[] get-tuple-element(get-tuple-element.215), index=1\n tuple.238 = (f32[<=250]{0}, s32[]) tuple(dynamic-update-slice.236, get-tuple-element.237)\n ROOT tuple.243 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(add.219, get-tuple-element.213, add.221, tuple.238, get-tuple-element.216, get-tuple-element.217)\n} \ncond_wrapper.257 {\n inputs.258 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0)\n get-tuple-element.0 = s32[] get-tuple-element(inputs.258), index=0\n get-tuple-element.1 = s32[] get-tuple-element(inputs.258), index=1\n compare.0 = pred[] compare(get-tuple-element.0, get-tuple-element.1), direction=LT\n get-tuple-element.2 = s32[] get-tuple-element(inputs.258), index=2\n get-tuple-element.3 = s32[] get-tuple-element(inputs.258), index=4\n compare.1 = pred[] compare(get-tuple-element.2, get-tuple-element.3), direction=LT\n and.0 = pred[] and(compare.0, compare.1)\n tuple.0 = (pred[]) tuple(and.0)\n ROOT get-tuple-element.260 = pred[] get-tuple-element(tuple.0), index=0\n reshape.0 = pred[] reshape(and.0)\n} \ncond_1_Sum-reduction.261 {\n x.262 = f32[] parameter(0)\n y.263 = f32[] parameter(1)\n ROOT add.264 = f32[] add(x.262, y.263)\n}\ncond_1_true_36__.265 {\n arg_tuple.266 = (s32[], f32[250]{0}) parameter(0)\n get-tuple-element.267 = s32[] get-tuple-element(arg_tuple.266), index=0\n reshape.269 = s32[1]{0} reshape(get-tuple-element.267)\n concatenate.270 = s32[1]{0} concatenate(reshape.269), dimensions={0}\n slice.280 = s32[1]{0} slice(concatenate.270), slice={[0:1]}\n reshape.281 = s32[] reshape(reshape.269)\n constant.275 = s32[] constant(0)\n compare.282 = pred[] compare(get-tuple-element.267, constant.275), direction=LT\n constant.276 = s32[] constant(250)\n add.283 = s32[] add(constant.276, get-tuple-element.267)\n select.284 = s32[] select(compare.282, add.283, get-tuple-element.267)\n constant.277 = s32[1]{0} constant({0})\n slice.278 = s32[1]{0} slice(constant.277), slice={[0:1]}\n reshape.279 = s32[] reshape(slice.278)\n subtract.285 = s32[] subtract(select.284, reshape.279)\n maximum.286 = s32[] maximum(subtract.285, constant.275)\n convert.287 = s32[] convert(maximum.286)\n get-tuple-element.268 = f32[250]{0} get-tuple-element(arg_tuple.266), index=1\n constant.271 = f32[] constant(0)\n pad.272 = f32[500]{0} pad(get-tuple-element.268, constant.271), padding=0_250\n constant.273 = s32[] constant(500)\n set-dimension-size.274 = f32[500]{0} set-dimension-size(pad.272, constant.273), dimensions={0}\n dynamic-slice.288 = f32[250]{0} dynamic-slice(set-dimension-size.274, reshape.279), dynamic_slice_sizes={250}\n reshape.289 = f32[250]{0} reshape(dynamic-slice.288)\n set-dimension-size.290 = f32[<=250]{0} set-dimension-size(dynamic-slice.288, maximum.286), dimensions={0}\n get-dimension-size.291 = s32[] get-dimension-size(set-dimension-size.290), dimensions={0}\n convert.292 = s32[] convert(get-dimension-size.291)\n broadcast.293 = s32[1]{0} broadcast(get-dimension-size.291), dimensions={}\n concatenate.294 = s32[1]{0} concatenate(broadcast.293), dimensions={0}\n slice.295 = s32[1]{0} slice(concatenate.294), slice={[0:1]}\n reshape.296 = s32[] reshape(broadcast.293)\n constant.309 = s32[] constant(0)\n constant.310 = s32[] constant(0)\n constant.312 = f32[] constant(0)\n broadcast.313 = f32[250]{0} broadcast(constant.312), dimensions={}\n constant.302 = s32[] constant(0)\n broadcast.303 = s32[250]{0} broadcast(constant.302), dimensions={}\n set-dimension-size.304 = s32[<=250]{0} set-dimension-size(broadcast.303, get-dimension-size.291), dimensions={0}\n get-dimension-size.311 = s32[] get-dimension-size(set-dimension-size.304), dimensions={0}\n set-dimension-size.314 = f32[<=250]{0} set-dimension-size(broadcast.313, get-dimension-size.311), dimensions={0}\n constant.315 = s32[] constant(0)\n tuple.316 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.314, constant.315)\n constant.305 = s32[] constant(250)\n tuple.306 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.290, constant.305)\n tuple.317 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(constant.309, get-dimension-size.291, constant.310, tuple.316, get-dimension-size.291, tuple.306)\n while.318 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) while(tuple.317), condition=cond_wrapper.257, body=cond_1_map_while_body_59__.210\n get-tuple-element.319 = s32[] get-tuple-element(while.318), index=0\n get-tuple-element.320 = s32[] get-tuple-element(while.318), index=1\n get-tuple-element.321 = s32[] get-tuple-element(while.318), index=2\n get-tuple-element.322 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=3\n get-tuple-element.323 = s32[] get-tuple-element(while.318), index=4\n get-tuple-element.324 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=5\n tuple.325 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(get-tuple-element.319, get-tuple-element.320, get-tuple-element.321, get-tuple-element.322, get-tuple-element.323, get-tuple-element.324)\n get-tuple-element.329 = (f32[<=250]{0}, s32[]) get-tuple-element(tuple.325), index=3\n get-tuple-element.332 = f32[<=250]{0} get-tuple-element(get-tuple-element.329), index=0\n convert.333 = f32[<=250]{0} convert(get-tuple-element.332)\n constant.334 = f32[] constant(0)\n convert.335 = f32[] convert(constant.334)\n reduce.336 = f32[] reduce(get-tuple-element.332, constant.334), dimensions={0}, to_apply=cond_1_Sum-reduction.261\n convert.337 = f32[] convert(reduce.336)\n reshape.338 = f32[] reshape(reduce.336)\n ROOT tuple.339 = (f32[]) tuple(reduce.336)\n} \ncond_1_false_37__.340 {\n arg_tuple.341 = (s32[], f32[250]{0}) parameter(0)\n constant.344 = f32[] constant(0)\n reshape.345 = f32[] reshape(constant.344)\n ROOT tuple.346 = (f32[]) tuple(constant.344)\n}\nENTRY tfcompile.377 {\n arg6.7 = s32[] parameter(6), parameter_replication={false}\n arg0.1 = s32[] parameter(0), parameter_replication={false}\n reshape.9 = s32[] reshape(arg0.1)\n arg1.2 = f32[250]{0} parameter(1), parameter_replication={false}\n reshape.10 = f32[250]{0} reshape(arg1.2)\n arg2.3 = pred[] parameter(2), parameter_replication={false}\n reshape.11 = pred[] reshape(arg2.3)\n arg3.4 = pred[] parameter(3), parameter_replication={false}\n reshape.12 = pred[] reshape(arg3.4)\n arg4.5 = s32[] parameter(4), parameter_replication={false}\n reshape.13 = s32[] reshape(arg4.5)\n arg5.6 = pred[] parameter(5), parameter_replication={false}\n reshape.14 = pred[] reshape(arg5.6)\n arg7.8 = pred[] parameter(7), parameter_replication={false}\n reshape.16 = pred[] reshape(arg7.8)\n tuple.1 = (s32[], f32[250]{0}) tuple(arg0.1, arg1.2)\n conditional.0 = (f32[], s32[]) conditional(arg2.3, tuple.1, tuple.1), true_computation=cond_2_true_195__.31, false_computation=cond_2_false_196__.76\n get-tuple-element.4 = f32[] get-tuple-element(conditional.0), index=0\n reshape.1 = f32[1]{0} reshape(get-tuple-element.4)\n get-tuple-element.5 = s32[] get-tuple-element(conditional.0), index=1\n convert.0 = f32[] convert(get-tuple-element.5)\n reshape.2 = f32[1]{0} reshape(convert.0)\n tuple.2 = (pred[], pred[], pred[]) tuple(arg3.4, arg5.6, arg7.8)\n conditional.1 = (pred[]) conditional(arg3.4, tuple.2, tuple.2), true_computation=cond_true_10__.85, false_computation=cond_false_11__.104\n get-tuple-element.6 = pred[] get-tuple-element(conditional.1), index=0\n tuple.3 = (s32[], f32[250]{0}) tuple(arg4.5, arg1.2)\n conditional.2 = (f32[]) conditional(get-tuple-element.6, tuple.3, tuple.3), true_computation=cond_1_true_36__.265, false_computation=cond_1_false_37__.340\n get-tuple-element.7 = f32[] get-tuple-element(conditional.2), index=0\n reshape.3 = f32[1]{0} reshape(get-tuple-element.7)\n concatenate.0 = f32[3]{0} concatenate(reshape.1, reshape.2, reshape.3), dimensions={0}\n tuple.4 = (f32[3]{0}) tuple(concatenate.0)\n get-tuple-element.374 = f32[3]{0} get-tuple-element(tuple.4), index=0\n reshape.375 = f32[3]{0} reshape(get-tuple-element.374)\n ROOT tuple.376 = (f32[3]{0}) tuple(get-tuple-element.374)\n reshape.4 = f32[3]{0} reshape(concatenate.0)\n} \n)\";\n TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo));\n TF_ASSERT_OK(RunInference());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1167,"cells":{"ID":{"kind":"string","value":"79bd5411-3f18-46a2-ad61-ae5728c586c1"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"call_inliner"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/call_inliner.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/call_inliner_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/call_inliner.h\"\n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/ir/hlo_sharding_metadata.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/hlo_domain_isolator.h\"\n#include \"xla/service/spmd/shardy/constants.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nclass SubcomputationInsertionVisitor : public DfsHloVisitorWithDefault {\n public:\n explicit SubcomputationInsertionVisitor(HloInstruction* call)\n : call_(call), outer_(call->parent()) {\n CHECK_EQ(HloOpcode::kCall, call_->opcode());\n }\n absl::Status DefaultAction(HloInstruction* hlo) override {\n std::vector new_operands;\n for (HloInstruction* operand : hlo->operands()) {\n TF_ASSIGN_OR_RETURN(HloInstruction * new_operand, Resolve(operand));\n new_operands.push_back(new_operand);\n }\n VLOG(1) << \"Cloning HLO and adding to caller: \" << hlo->ToString();\n auto new_hlo = hlo->CloneWithNewOperands(hlo->shape(), new_operands);\n HloInstruction* new_hlo_pointer =\n outer_->AddInstruction(std::move(new_hlo));\n TF_RETURN_IF_ERROR(NoteMapping(hlo, new_hlo_pointer));\n for (HloInstruction* control_predecessor : hlo->control_predecessors()) {\n TF_ASSIGN_OR_RETURN(HloInstruction * new_control_predecessor,\n Resolve(control_predecessor));\n TF_RETURN_IF_ERROR(\n new_control_predecessor->AddControlDependencyTo(new_hlo_pointer));\n }\n return absl::OkStatus();\n }\n absl::Status HandleParameter(HloInstruction* parameter) override {\n TF_RETURN_IF_ERROR(NoteMapping(\n parameter, call_->mutable_operand(parameter->parameter_number())));\n return absl::OkStatus();\n }\n absl::Status FinishVisit(HloInstruction* root) override {\n TF_ASSIGN_OR_RETURN(HloInstruction * new_root, Resolve(root));\n VLOG(1) << \"Replacing all uses of \" << call_->ToString()\n << \" with new root \" << new_root->ToString();\n return outer_->ReplaceInstruction(call_, new_root);\n }\n CallInliner::InlinedInstructionMap ConsumeInstructionMap() {\n return std::move(subcomputation_hlo_to_new_hlo_);\n }\n private:\n absl::StatusOr Resolve(HloInstruction* subcomputation_hlo) {\n auto it = subcomputation_hlo_to_new_hlo_.find(subcomputation_hlo);\n if (it == subcomputation_hlo_to_new_hlo_.end()) {\n return NotFound(\n \"Could not find mapping from subcomputation HLO %s to a cloned HLO.\",\n subcomputation_hlo->ToString());\n }\n return it->second;\n }\n absl::Status NoteMapping(HloInstruction* subcomputation_hlo,\n HloInstruction* new_hlo) {\n auto result = subcomputation_hlo_to_new_hlo_.insert(\n std::make_pair(subcomputation_hlo, new_hlo));\n TF_RET_CHECK(result.second)\n << \"A mapping for the subcomputation HLO is already present.\";\n return absl::OkStatus();\n }\n HloInstruction* call_;\n HloComputation* outer_;\n CallInliner::InlinedInstructionMap subcomputation_hlo_to_new_hlo_;\n};\nbool InlineUnderShardy(HloInstruction* instruction) {\n return !(instruction->GetModule()->config().use_shardy_partitioner() &&\n (absl::StrContains(instruction->to_apply()->name(), \"shmap_body\") ||\n absl::StartsWith(instruction->to_apply()->name(),\n sdy::kManualComputationBodyFuncName.str())));\n}\n} \n absl::StatusOr\nCallInliner::Inline(HloInstruction* call) {\n TF_RET_CHECK(call->opcode() == HloOpcode::kCall)\n << \"Instruction was not a call op: \" << call->opcode();\n if (call->is_composite()) {\n FrontendAttributes frontend_attributes = call->frontend_attributes();\n frontend_attributes.mutable_map()->erase(\"composite.name\");\n frontend_attributes.mutable_map()->erase(\"composite.attributes\");\n frontend_attributes.mutable_map()->erase(\"composite.version\");\n call->set_frontend_attributes(frontend_attributes);\n }\n const auto& callees = call->called_computations();\n TF_RET_CHECK(callees.size() == 1);\n HloComputation* callee = callees[0];\n if (call->has_frontend_attributes()) {\n const FrontendAttributes& call_attributes = call->frontend_attributes();\n std::string has_fuse =\n call_attributes.map().contains(\"MUST_FUSE\") ? \"MUST_FUSE\"\n : call_attributes.map().contains(\"MAXIMAL_FUSE\") ? \"MAXIMAL_FUSE\"\n : \"\";\n if (!has_fuse.empty()) {\n for (auto instruction : callee->instructions()) {\n if (instruction->IsFusible()) {\n FrontendAttributes frontend_attributes =\n instruction->frontend_attributes();\n frontend_attributes.mutable_map()->insert(\n {has_fuse, call_attributes.map().at(has_fuse)});\n instruction->set_frontend_attributes(frontend_attributes);\n }\n }\n }\n }\n SubcomputationInsertionVisitor visitor(call);\n TF_RETURN_IF_ERROR(callee->Accept(&visitor));\n return visitor.ConsumeInstructionMap();\n}\nbool CallInliner::IsInlineableCallOp(HloInstruction* instruction) const {\n return instruction->opcode() == HloOpcode::kCall &&\n !instruction->has_backend_config() &&\n !instruction->parent()->IsAsyncComputation() &&\n InlineUnderShardy(instruction);\n}\nabsl::StatusOr CallInliner::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n std::unique_ptr call_graph = CallGraph::Build(module);\n bool did_mutate = false;\n TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node)\n -> absl::Status {\n if (!HloInstruction::IsThreadIncluded(\n node.computation()->execution_thread(), execution_threads)) {\n return absl::OkStatus();\n }\n VLOG(1) << \"Visiting node: \" << node.ToString();\n for (HloInstruction* instruction :\n node.computation()->MakeInstructionPostOrder()) {\n if (IsInlineableCallOp(instruction)) {\n const auto& callees = instruction->called_computations();\n TF_RET_CHECK(callees.size() == 1);\n if (!single_call_site_ || call_graph->GetNode(instruction->to_apply())\n .caller_callsites()\n .size() == 1) {\n TF_ASSIGN_OR_RETURN(CallInliner::InlinedInstructionMap inline_map,\n Inline(instruction));\n if (update_domain_) {\n HloDomainIsolator isolator(\n []() { return ShardingDomainCreator{}; });\n for (const auto& [call_inst, inlined_inst] : inline_map) {\n TF_RETURN_IF_ERROR(isolator.UpdateDomains(inlined_inst).status());\n }\n }\n did_mutate = true;\n }\n }\n }\n return absl::OkStatus();\n }));\n if (did_mutate) {\n TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());\n }\n return did_mutate;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/call_inliner.h\"\n#include \n#include \n#include \"absl/log/log.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nusing CallInlinerTest = HloTestBase;\nTEST_F(CallInlinerTest, ControlDependenciesAreCarriedToCaller) {\n HloComputation::Builder inner(TestName() + \".inner\");\n HloInstruction* zero = inner.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(24.0f)));\n HloInstruction* one = inner.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.0f)));\n TF_ASSERT_OK(zero->AddControlDependencyTo(one));\n auto module = CreateNewVerifiedModule();\n HloComputation* inner_computation =\n module->AddEmbeddedComputation(inner.Build());\n HloComputation::Builder outer(TestName() + \".outer\");\n Shape r0f32 = ShapeUtil::MakeShape(F32, {});\n outer.AddInstruction(\n HloInstruction::CreateCall(r0f32, {}, inner_computation));\n auto computation = module->AddEntryComputation(outer.Build());\n CallInliner call_inliner;\n TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));\n ASSERT_TRUE(mutated);\n EXPECT_THAT(computation->root_instruction(), op::Constant());\n EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement(),\n 42);\n ASSERT_EQ(1, computation->root_instruction()->control_predecessors().size());\n auto prior = computation->root_instruction()->control_predecessors()[0];\n EXPECT_THAT(prior, op::Constant());\n EXPECT_EQ(prior->literal().GetFirstElement(), 24);\n}\nTEST_F(CallInlinerTest, CallsWithinWhileBodiesAreInlined) {\n const Shape pred = ShapeUtil::MakeShape(PRED, {});\n auto module = CreateNewVerifiedModule();\n HloComputation::Builder just_false(TestName() + \".false\");\n just_false.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(false)));\n HloComputation* false_computation =\n module->AddEmbeddedComputation(just_false.Build());\n HloComputation::Builder call_false_builder(TestName() + \".call_false\");\n call_false_builder.AddInstruction(\n HloInstruction::CreateParameter(0, pred, \"param\"));\n call_false_builder.AddInstruction(\n HloInstruction::CreateCall(pred, {}, false_computation));\n HloComputation* call_false =\n module->AddEmbeddedComputation(call_false_builder.Build());\n HloComputation::Builder outer(TestName() + \".outer\");\n HloInstruction* init_value = outer.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(false)));\n outer.AddInstruction(\n HloInstruction::CreateWhile(pred, call_false, call_false, init_value));\n auto computation = module->AddEntryComputation(outer.Build());\n CallInliner call_inliner;\n TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));\n ASSERT_TRUE(mutated);\n EXPECT_THAT(\n computation->root_instruction()->while_condition()->root_instruction(),\n op::Constant());\n EXPECT_THAT(computation->root_instruction()->while_body()->root_instruction(),\n op::Constant());\n}\nTEST_F(CallInlinerTest, InlineWithoutRunningPass) {\n const Shape pred = ShapeUtil::MakeShape(PRED, {});\n auto module = CreateNewVerifiedModule();\n HloComputation::Builder just_false(TestName() + \".false\");\n auto* true_constant = just_false.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR1({true})));\n auto* false_constant = just_false.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(false)));\n TF_ASSERT_OK(false_constant->AddControlDependencyTo(true_constant));\n HloComputation* false_computation =\n module->AddEmbeddedComputation(just_false.Build());\n HloComputation::Builder call_false_builder(TestName() + \".call_false\");\n HloInstruction* call = call_false_builder.AddInstruction(\n HloInstruction::CreateCall(pred, {}, false_computation));\n auto computation = module->AddEntryComputation(call_false_builder.Build());\n TF_ASSERT_OK(CallInliner::Inline(call).status());\n EXPECT_THAT(computation->root_instruction(), op::Constant());\n EXPECT_THAT(computation->root_instruction()->control_successors(),\n ElementsAre(op::Constant()));\n}\nTEST_F(CallInlinerTest, InlineWithEmptyComputation) {\n const Shape pred = ShapeUtil::MakeShape(PRED, {});\n auto module = CreateNewVerifiedModule();\n Shape r0s32 = ShapeUtil::MakeShape(S32, {});\n HloComputation::Builder empty(TestName() + \".empty\");\n empty.AddInstruction(HloInstruction::CreateParameter(0, r0s32, \"A\"));\n empty.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));\n HloComputation* empty_computation =\n module->AddEmbeddedComputation(empty.Build());\n HloComputation::Builder empty2(TestName() + \".empty\");\n empty2.AddInstruction(HloInstruction::CreateParameter(0, r0s32, \"A\"));\n empty2.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));\n HloComputation* empty2_computation =\n module->AddEmbeddedComputation(empty2.Build());\n HloComputation::Builder entry(\"entry\");\n auto zero = entry.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));\n entry.AddInstruction(\n HloInstruction::CreateCall(r0s32, {zero}, empty_computation));\n HloInstruction* call1 = entry.AddInstruction(\n HloInstruction::CreateCall(r0s32, {zero}, empty2_computation));\n entry.AddInstruction(\n HloInstruction::CreateCall(r0s32, {call1}, empty_computation));\n auto computation = module->AddEntryComputation(entry.Build());\n CallInliner call_inliner;\n TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));\n ASSERT_TRUE(mutated);\n EXPECT_THAT(computation->root_instruction(), op::Constant());\n}\nTEST_F(CallInlinerTest, CallToOutfeedComputationIsInlined) {\n const Shape f32 = ShapeUtil::MakeShape(F32, {});\n auto module = CreateNewVerifiedModule();\n HloComputation::Builder outfeeder(TestName() + \".outfeeder\");\n auto value = outfeeder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.0)));\n auto token = outfeeder.AddInstruction(HloInstruction::CreateToken());\n outfeeder.AddInstruction(\n HloInstruction::CreateOutfeed(f32, value, token, \"\"));\n auto outfeed_computation = module->AddEmbeddedComputation(outfeeder.Build());\n HloComputation::Builder outer(TestName() + \".outer\");\n outer.AddInstruction(HloInstruction::CreateCall(\n outfeed_computation->root_instruction()->shape(), {},\n outfeed_computation));\n module->AddEntryComputation(outer.Build());\n CallInliner call_inliner;\n TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));\n ASSERT_TRUE(mutated);\n}\nTEST_F(CallInlinerTest, InlineSingleUseCalleesOnly) {\n const absl::string_view hlo_string = R\"(\n HloModule inline_module\n a {\n ROOT tuple = () tuple()\n }\n b {\n ROOT tuple.1 = () tuple()\n }\n ENTRY inline {\n a = () call(), to_apply=a\n b = () call(), to_apply=a\n c = () call(), to_apply=b\n ROOT tuple = ((), (), ()) tuple(a, b, c)\n })\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CallInliner call_inliner(true);\n TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));\n ASSERT_TRUE(mutated);\n ASSERT_EQ(module->entry_computation()->instruction_count(), 4);\n auto inst = module->entry_computation()->instructions().begin();\n EXPECT_THAT(*inst, op::Call());\n ++inst;\n EXPECT_THAT(*inst, op::Call());\n ++inst;\n EXPECT_THAT(*inst, op::Tuple());\n ++inst;\n EXPECT_THAT(*inst, op::Tuple());\n}\nTEST_F(CallInlinerTest, InliningPerformedInsideSpecifiedThreadsOnly) {\n const std::string hlo_string = R\"(\nHloModule inline_specified_threads_only\n%secondary_inner () -> u32[] {\n ROOT %co.2 = u32[] constant(2)\n}, execution_thread=\"secondary_thread\"\n%secondary_outer () -> u32[] {\n %co.1 = u32[] constant(1)\n %call.1 = u32[] call(), to_apply=%secondary_inner\n ROOT %add.1 = add(%co.1, %call.1)\n}, execution_thread=\"secondary_thread\"\n%main_inner () -> u32[] {\n %co.0 = u32[] constant(0)\n %async-start = ((), u32[], u32[]) call-start(), async_execution_thread=\"secondary_thread\", to_apply=secondary_outer\n %async-done = u32[] call-done(((), u32[], u32[]) %async-start)\n ROOT %add.2 = add(%co.0, %async-done)\n}\nENTRY %main_outer (p0: u32[]) -> u32[] {\n %p.0 = u32[] parameter(0)\n %call.0 = u32[] call(), to_apply=%main_inner\n ROOT %add.3 = add(%p.0, %call.0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnUnverifiedModule(hlo_string));\n auto module_clone = module->Clone(\"\");\n {\n VLOG(1) << \"Module BEFORE CallInliner\\n\" << module->ToString();\n CallInliner call_inliner;\n TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));\n VLOG(1) << \"Module AFTER CallInliner\\n\" << module->ToString();\n EXPECT_TRUE(mutated);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Add(op::Parameter(0),\n op::Add(op::Constant(LiteralUtil::CreateR0(0)),\n op::AsyncDone())));\n EXPECT_THAT(module->entry_computation()\n ->root_instruction()\n ->operand(1)\n ->operand(1)\n ->async_wrapped_instruction()\n ->called_computations()\n .at(0)\n ->root_instruction(),\n op::Add(op::Constant(LiteralUtil::CreateR0(1)),\n op::Constant(LiteralUtil::CreateR0(2))));\n }\n VLOG(1) << \"Restricting CallInliner to the secondary thread.\";\n {\n CallInliner call_inliner;\n TF_ASSERT_OK_AND_ASSIGN(\n bool mutated,\n call_inliner.Run(module_clone.get(), {\"secondary_thread\"}));\n VLOG(1) << \"Module AFTER CallInliner\\n\" << module_clone->ToString();\n EXPECT_TRUE(mutated);\n EXPECT_THAT(module_clone->entry_computation()->root_instruction(),\n op::Add(op::Parameter(0), op::Call()));\n EXPECT_THAT(module_clone->entry_computation()\n ->root_instruction()\n ->operand(1)\n ->called_computations()\n .at(0)\n ->root_instruction(),\n op::Add(op::Constant(LiteralUtil::CreateR0(0)),\n op::AsyncDone()));\n EXPECT_THAT(module_clone->entry_computation()\n ->root_instruction()\n ->operand(1)\n ->called_computations()\n .at(0)\n ->root_instruction()\n ->operand(1)\n ->async_wrapped_instruction()\n ->called_computations()\n .at(0)\n ->root_instruction(),\n op::Add(op::Constant(LiteralUtil::CreateR0(1)),\n op::Constant(LiteralUtil::CreateR0(2))));\n }\n}\nTEST_F(CallInlinerTest, InlineCompositeCall) {\n const absl::string_view hlo_string = R\"(\n HloModule composite\n %add (lhs: f32[]) -> f32[] {\n %lhs = f32[] parameter(0)\n %rhs = f32[] constant(2)\n ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)\n }\n ENTRY %main () -> f32[] {\n %lhs = f32[] constant(42)\n ROOT %call = f32[] call(f32[] %lhs), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor},composite.name=\"foo.bar\",composite.version=\"1\"}\n })\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CallInliner call_inliner(true);\n TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));\n ASSERT_TRUE(mutated);\n ASSERT_EQ(module->entry_computation()->instruction_count(), 3);\n auto inst = module->entry_computation()->instructions().begin();\n EXPECT_THAT(*inst, op::Constant());\n ++inst;\n EXPECT_THAT(*inst, op::Constant());\n ++inst;\n EXPECT_THAT(*inst, op::Add());\n EXPECT_TRUE((*inst)->frontend_attributes().map().empty());\n}\nTEST_F(CallInlinerTest, UseShardyMhloToHloShmapBodyNotInlined) {\n const char* const hloString = R\"(\n HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}\n %prefix_shmap_body_suffix.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {\n %Arg_0.5 = f32[1,8]{1,0} parameter(0)\n ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file=\"-\" source_line=11}\n }\n ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {\n %Arg_0.1 = f32[8,8]{1,0} parameter(0)\n %custom-call.2 = f32[8,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target=\"Sharding\", sharding={devices=[8,1]<=[8]}, metadata={source_file=\"-\" source_line=3}\n %custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %custom-call.2), custom_call_target=\"SPMDFullToShardShape\", sharding={manual}, metadata={source_file=\"-\" source_line=4}\n %call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_shmap_body_suffix.4\n %custom-call.8 = f32[1,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target=\"Sharding\", sharding={manual}, metadata={source_file=\"-\" source_line=6}\n ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %custom-call.8), custom_call_target=\"SPMDShardToFullShape\", sharding={devices=[8,1]<=[8]}, metadata={source_file=\"-\" source_line=7}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));\n module->mutable_config().set_use_shardy_partitioner(true);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));\n VLOG(1) << module->ToString();\n EXPECT_FALSE(changed);\n HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall);\n EXPECT_NE(call, nullptr);\n EXPECT_TRUE(call->has_to_apply());\n EXPECT_EQ(call->to_apply()->name(), \"prefix_shmap_body_suffix.4\");\n}\nTEST_F(CallInlinerTest, UseShardManualComputationBodyNotInlined) {\n const char* const hloString = R\"(\n HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}\n %xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {\n %Arg_0.5 = f32[1,8]{1,0} parameter(0)\n ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file=\"-\" source_line=11}\n }\n ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {\n %Arg_0.1 = f32[8,8]{1,0} parameter(0)\n %custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target=\"SPMDFullToShardShape\", sharding={manual}, metadata={source_file=\"-\" source_line=4}\n %call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%xla.sdy.manual_computation_body.4\n ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target=\"SPMDShardToFullShape\", sharding={devices=[8,1]<=[8]}, metadata={source_file=\"-\" source_line=7}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));\n module->mutable_config().set_use_shardy_partitioner(true);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));\n VLOG(1) << module->ToString();\n EXPECT_FALSE(changed);\n HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall);\n EXPECT_NE(call, nullptr);\n EXPECT_TRUE(call->has_to_apply());\n EXPECT_EQ(call->to_apply()->name(), \"xla.sdy.manual_computation_body.4\");\n}\nTEST_F(CallInlinerTest, UseShardManualComputationBodyInlined) {\n const char* const hloString = R\"(\n HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}\n %prefix_xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {\n %Arg_0.5 = f32[1,8]{1,0} parameter(0)\n ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file=\"-\" source_line=11}\n }\n ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {\n %Arg_0.1 = f32[8,8]{1,0} parameter(0)\n %custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target=\"SPMDFullToShardShape\", sharding={manual}, metadata={source_file=\"-\" source_line=4}\n %call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_xla.sdy.manual_computation_body.4\n ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target=\"SPMDShardToFullShape\", sharding={devices=[8,1]<=[8]}, metadata={source_file=\"-\" source_line=7}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));\n module->mutable_config().set_use_shardy_partitioner(true);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));\n VLOG(1) << module->ToString();\n EXPECT_TRUE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1168,"cells":{"ID":{"kind":"string","value":"164bcdca-2f59-413e-98d0-a4c8cf57007b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reshape_decomposer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/reshape_decomposer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/reshape_decomposer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/reshape_decomposer.h\"\n#include \"absl/status/status.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/service/hlo_creation_utils.h\"\nnamespace xla {\nnamespace {\nclass ReshapeDecomposerVisitor : public DfsHloRewriteVisitor {\n public:\n absl::Status HandleReshape(HloInstruction* reshape) override {\n HloInstruction* operand = reshape->mutable_operand(0);\n auto s = reshape->shape();\n auto s0 = operand->shape();\n if (ShapeUtil::ReshapeIsBitcast(s, s0)) {\n auto b = MakeBitcastHlo(operand, s, &operand->metadata());\n return ReplaceInstruction(reshape, b);\n } else if (auto output_aligned_input_shape =\n ShapeUtil::AlignLayouts(s, s0)) {\n Shape new_input_shape = *output_aligned_input_shape;\n HloInstruction* copied_operand = MakeCopyHlo(operand, new_input_shape);\n VLOG(3) << \"Decomposing reshape into reshape-bitcast and a physical \"\n \"transpose on the operand: \"\n << copied_operand->ToString();\n auto b = MakeBitcastHlo(copied_operand, s, &copied_operand->metadata());\n TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, b));\n DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));\n } else if (auto input_aligned_output_shape =\n ShapeUtil::AlignLayouts(s0, s)) {\n Shape new_output_shape = *input_aligned_output_shape;\n auto b = MakeBitcastHlo(operand, new_output_shape, &operand->metadata());\n DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));\n HloInstruction* copied_result = MakeCopyHlo(b, s);\n VLOG(3) << \"Decomposing reshape into reshape-bitcast and a physical \"\n \"transposition on the result: \"\n << copied_result->ToString();\n TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, copied_result));\n } else {\n VLOG(3) << \"Both input and output of reshape are not alignable, create \"\n \"two physical transposes\";\n auto s0_normalized = ShapeUtil::MakeShapeWithDescendingLayout(\n s0.element_type(), s0.dimensions());\n auto c1 = MakeCopyHlo(reshape->mutable_operand(0), s0_normalized);\n auto s_normalized = ShapeUtil::MakeShapeWithDescendingLayout(\n s.element_type(), s.dimensions());\n auto b = MakeBitcastHlo(c1, s_normalized, &c1->metadata());\n DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape()));\n auto c2 = MakeCopyHlo(b, s);\n TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, c2));\n }\n return absl::OkStatus();\n }\n};\n} \nabsl::StatusOr ReshapeDecomposer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n return ReshapeDecomposerVisitor{}.RunOnModule(module, execution_threads);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/reshape_decomposer.h\"\n#include \n#include \n#include \"xla/service/hlo_parser.h\"\n#include \"xla/test.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/filecheck.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nclass ReshapeDecomposerTest : public HloTestBase {\n public:\n void CheckReshapeDecomposer(const char* hlo,\n std::optional expected) {\n RunAndFilecheckHloRewrite(\n hlo, ReshapeDecomposer{}, expected,\n [&](HloModule* module) {\n EXPECT_TRUE(absl::c_all_of(\n module->entry_computation()->instructions(),\n [&](const HloInstruction* instr) {\n return instr->opcode() != HloOpcode::kReshape ||\n ShapeUtil::ReshapeIsBitcast(instr->operand(0)->shape(),\n instr->shape());\n }));\n });\n }\n};\nTEST_F(ReshapeDecomposerTest, IsBitcast) {\n const char* hlo = R\"(\nHloModule Module\nENTRY main {\n p = f32[8]{0} parameter(0)\n ROOT r = f32[4,2]{1,0} reshape(p)\n}\n)\";\n CheckReshapeDecomposer(hlo, R\"(\n )\");\n}\nTEST_F(ReshapeDecomposerTest, AlignableOutput) {\n const char* hlo = R\"(\nHloModule Module\nENTRY main {\n p = f32[8,3]{1,0} parameter(0)\n ROOT r = f32[4,2,3]{0,1,2} reshape(p)\n}\n)\";\n CheckReshapeDecomposer(hlo, R\"(\n)\");\n}\nTEST_F(ReshapeDecomposerTest, AlignableInput) {\n const char* hlo = R\"(\nHloModule Module\nENTRY main {\n p = f32[4,2,3]{0,1,2} parameter(0)\n ROOT r = f32[8,3]{1,0} reshape(p)\n}\n)\";\n CheckReshapeDecomposer(hlo, R\"(\n)\");\n}\nTEST_F(ReshapeDecomposerTest, NotAlignable) {\n const char* hlo = R\"(\nHloModule Module\nENTRY main {\n p = f32[4,2,3,8]{0,2,1,3} parameter(0)\n ROOT r = f32[8,3,2,4]{0,2,1,3} reshape(p)\n}\n)\";\n CheckReshapeDecomposer(hlo, R\"(\n)\");\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_decomposer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_decomposer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1169,"cells":{"ID":{"kind":"string","value":"872adbcd-9a42-46b0-9278-91c217a8afec"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"indexed_array_analysis"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/indexed_array_analysis.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/indexed_array_analysis_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/indexed_array_analysis.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/evaluator/hlo_evaluator.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal.h\"\n#include \"xla/map_util.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing Analysis = IndexedArrayAnalysis;\nusing UnknownArray = Analysis::UnknownArray;\nusing ConstantArray = Analysis::ConstantArray;\nusing ReshapedArray = Analysis::ReshapedArray;\nusing ScalarIndexedArray = Analysis::ScalarIndexedArray;\nusing absl::StrJoin;\n} \nstd::string IndexedArrayAnalysis::ToString(Array* root, bool print_constants) {\n switch (root->kind()) {\n case Array::kUnknown: {\n auto* unknown_tensor = root->as();\n return absl::StrCat(\"%\", unknown_tensor->instruction().name());\n }\n case Array::kConstant: {\n if (print_constants) {\n std::string contents = root->as()->literal()->ToString();\n return absl::StrCat(\"(constant \", ShapeUtil::HumanString(root->shape()),\n \" \", contents, \")\");\n }\n return absl::StrCat(\"(constant \", ShapeUtil::HumanString(root->shape()),\n \")\");\n }\n case Array::kReshaped: {\n ReshapedArray* reshaped_array = root->as();\n return absl::StrCat(\n \"(reshape \", ToString(reshaped_array->operand(), print_constants),\n \" to \", ShapeUtil::HumanString(reshaped_array->shape()), \")\");\n }\n case Array::kScalarIndexedConstant:\n case Array::kScalarIndexed: {\n auto* indexed_array = root->as();\n std::string name = root->kind() == Array::kScalarIndexedConstant\n ? \"scalar-indexed-const\"\n : \"scalar-indexed\";\n return absl::StrCat(\n \"(\", name, \" \", ToString(indexed_array->source(), print_constants),\n \" \", ToString(indexed_array->indices(), print_constants), \" \",\n indexed_array->source_dim(), \"->[\",\n StrJoin(indexed_array->output_dims(), \",\"), \"])\");\n }\n }\n}\nabsl::StatusOr IndexedArrayAnalysis::GetArrayFor(\n const HloInstruction* instr) {\n auto it = cache_.find(instr);\n if (it != cache_.end()) {\n return it->second;\n }\n TF_RETURN_IF_ERROR(TraverseAndPopulateCache(instr));\n return FindOrDie(cache_, instr);\n}\nabsl::Status IndexedArrayAnalysis::TraverseAndPopulateCache(\n const HloInstruction* root) {\n absl::InlinedVector stack;\n enum DfsState { kDiscovered, kVisited };\n absl::flat_hash_map dfs_state_map;\n stack.push_back(root);\n InsertOrDie(&dfs_state_map, root, kDiscovered);\n do {\n const HloInstruction* instr = stack.back();\n if (cache_.contains(instr)) {\n stack.pop_back();\n continue;\n }\n switch (FindOrDie(dfs_state_map, instr)) {\n case kDiscovered: {\n for (const HloInstruction* operand : instr->operands()) {\n if (!cache_.contains(operand)) {\n stack.push_back(operand);\n CHECK(!dfs_state_map.contains(operand) ||\n dfs_state_map[operand] == kDiscovered);\n dfs_state_map[operand] = kDiscovered;\n }\n }\n dfs_state_map[instr] = kVisited;\n break;\n }\n case kVisited:\n stack.pop_back();\n TF_ASSIGN_OR_RETURN(Array * array, ComputeArrayFor(instr));\n InsertOrDie(&cache_, instr, array);\n break;\n }\n } while (!stack.empty());\n return absl::OkStatus();\n}\nabsl::StatusOr IndexedArrayAnalysis::ComputeArrayFor(\n const HloInstruction* instr) {\n Array* computed_array;\n if (instr->IsElementwise() && instr->operand_count() == 1) {\n TF_ASSIGN_OR_RETURN(\n computed_array,\n ComputeArrayForElementwiseUnaryOp(\n instr->opcode(), FindOrDie(cache_, instr->operand(0))));\n } else if (instr->IsElementwise() && instr->operand_count() == 2) {\n TF_ASSIGN_OR_RETURN(\n computed_array,\n ComputeArrayForElementwiseBinaryOp(\n instr->opcode(), FindOrDie(cache_, instr->operand(0)),\n FindOrDie(cache_, instr->operand(1))));\n } else if (instr->opcode() == HloOpcode::kConstant) {\n TF_ASSIGN_OR_RETURN(computed_array,\n ComputeArrayForConstant(instr->literal()));\n } else if (instr->opcode() == HloOpcode::kGather) {\n TF_ASSIGN_OR_RETURN(\n computed_array,\n ComputeArrayForGather(instr->shape(), instr->gather_dimension_numbers(),\n instr->gather_slice_sizes(),\n FindOrDie(cache_, instr->operand(0)),\n FindOrDie(cache_, instr->operand(1))));\n } else if (instr->opcode() == HloOpcode::kReshape) {\n TF_ASSIGN_OR_RETURN(\n computed_array,\n ComputeArrayForReshape(instr->shape(),\n FindOrDie(cache_, instr->operand(0))));\n } else if (instr->opcode() == HloOpcode::kDot) {\n TF_ASSIGN_OR_RETURN(\n computed_array,\n ComputeArrayForDot(instr->shape(), instr->dot_dimension_numbers(),\n instr->precision_config(),\n FindOrDie(cache_, instr->operand(0)),\n FindOrDie(cache_, instr->operand(1))));\n } else {\n computed_array = nullptr;\n }\n if (!computed_array) {\n computed_array = Construct(instr);\n }\n return computed_array;\n}\nabsl::StatusOr IndexedArrayAnalysis::ComputeArrayForConstant(\n const Literal& literal) {\n return Construct(&literal);\n}\nabsl::StatusOr IndexedArrayAnalysis::FoldGatherOfGather(\n ScalarIndexedArray* source, Array* indices, int64_t source_dim,\n absl::Span output_dims, Shape shape) {\n Array* a = source->source();\n Array* x = source->indices();\n Array* y = indices;\n enum class IndexComponent { Ungathered, GatheredFirst, GatheredSecond };\n std::vector simulated_index(a->shape().dimensions_size(),\n IndexComponent::Ungathered);\n EraseAt(&simulated_index, source->source_dim());\n for (int64_t gather_dim : source->output_dims()) {\n simulated_index.insert(simulated_index.begin() + gather_dim,\n IndexComponent::GatheredFirst);\n }\n EraseAt(&simulated_index, source_dim);\n for (int64_t output_dim : output_dims) {\n simulated_index.insert(simulated_index.begin() + output_dim,\n IndexComponent::GatheredSecond);\n }\n int64_t source_dim_for_index_array =\n FindIndex(source->output_dims(), source_dim);\n CHECK_NE(source_dim_for_index_array, source->output_dims().size());\n std::vector output_dims_for_index_array;\n int64_t gathered_index_components_seen = 0;\n for (IndexComponent simulation_dim : simulated_index) {\n if (simulation_dim == IndexComponent::GatheredSecond) {\n output_dims_for_index_array.push_back(gathered_index_components_seen);\n }\n if (simulation_dim != IndexComponent::Ungathered) {\n gathered_index_components_seen++;\n }\n }\n std::vector dim_sizes_for_composed_index;\n std::vector output_dims_for_new_gather;\n for (int64_t i = 0, e = simulated_index.size(); i < e; i++) {\n if (simulated_index[i] != IndexComponent::Ungathered) {\n dim_sizes_for_composed_index.push_back(shape.dimensions(i));\n output_dims_for_new_gather.push_back(i);\n }\n }\n Array* inner_indices = ConstructScalarIndexedArray(\n x, y, source_dim_for_index_array, output_dims_for_index_array,\n ShapeUtil::MakeShape(x->shape().element_type(),\n dim_sizes_for_composed_index));\n return ConstructScalarIndexedArray(a, inner_indices, source->source_dim(),\n output_dims_for_new_gather,\n std::move(shape));\n}\nabsl::StatusOr IndexedArrayAnalysis::ComputeArrayForGather(\n const Shape& shape, const GatherDimensionNumbers& dim_numbers,\n absl::Span slice_sizes, Array* source, Array* indices) {\n if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) {\n VLOG(3) << \"ComputeArrayForGather: indices are not scalar\";\n return nullptr;\n }\n CHECK_EQ(dim_numbers.start_index_map_size(), 1);\n if (dim_numbers.collapsed_slice_dims_size() != 1 ||\n dim_numbers.collapsed_slice_dims(0) != dim_numbers.start_index_map(0)) {\n VLOG(3) << \"ComputeArrayForGather: gather operations must elide \"\n \"start_index_map[0] and \"\n \"start_index_map[0] only\";\n return nullptr;\n }\n for (int64_t i = 0, e = source->shape().dimensions_size(); i < e; i++) {\n if (i != dim_numbers.collapsed_slice_dims(0) &&\n source->shape().dimensions(i) != slice_sizes[i]) {\n VLOG(3) << \"ComputeArrayForGather: slice_sizes[\" << i\n << \"] != source->shape().dimensions(\" << i << \") -- \"\n << source->shape().dimensions(i) << \" vs. \" << slice_sizes[i]\n << \" with dim_numbers.collapsed_slice_dims(0) = \"\n << dim_numbers.collapsed_slice_dims(0);\n return nullptr;\n }\n }\n int64_t source_dim = dim_numbers.start_index_map(0);\n std::vector output_dims;\n for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {\n if (!absl::c_binary_search(dim_numbers.offset_dims(), i)) {\n output_dims.push_back(i);\n }\n }\n if (auto* indexed = dynamic_cast(source)) {\n if (absl::c_linear_search(indexed->output_dims(), source_dim)) {\n return FoldGatherOfGather(indexed, indices, source_dim, output_dims,\n shape);\n }\n } else if (auto* constant = dynamic_cast(source)) {\n return Construct(constant, indices, source_dim,\n output_dims, shape);\n }\n return Construct(source, indices, source_dim, output_dims,\n shape);\n}\nnamespace {\nint64_t FindSuffixWithProduct(absl::Span values,\n int64_t product) {\n DCHECK(absl::c_all_of(values, [](int64_t value) { return value > 0; }));\n int64_t current_product = 1;\n int64_t i;\n for (i = values.size() - 1; i >= 0 && product > current_product; --i) {\n current_product *= values[i];\n }\n if (product == current_product) {\n return i + 1;\n }\n return -1;\n}\nstruct ReshapePassthroughDimPair {\n int64_t result_dim;\n int64_t operand_dim;\n};\nstd::vector ComputeReshapePassthroughDimPairs(\n absl::Span operand_shape,\n absl::Span result_shape) {\n std::vector result;\n int64_t result_subarray_size = 1;\n for (int64_t result_dim = result_shape.size() - 1; result_dim >= 0;\n --result_dim) {\n int64_t candidate_operand_dim =\n FindSuffixWithProduct(operand_shape, result_subarray_size);\n CHECK_NE(candidate_operand_dim, 0)\n << \"result_dim = \" << result_dim\n << \", result_subarray_size = \" << result_subarray_size\n << \", result_shape = [\" << StrJoin(result_shape, \",\") << \"]\"\n << \", operand_shape = [\" << StrJoin(operand_shape, \",\") << \"]\";\n if (candidate_operand_dim != -1 &&\n result_shape[result_dim] == operand_shape[candidate_operand_dim - 1]) {\n result.push_back({result_dim,\n candidate_operand_dim - 1});\n }\n result_subarray_size *= result_shape[result_dim];\n }\n absl::c_reverse(result);\n if (VLOG_IS_ON(3)) {\n std::vector result_strings;\n absl::c_transform(result, std::back_inserter(result_strings),\n [](ReshapePassthroughDimPair value) {\n return absl::StrCat(value.result_dim, \"->\",\n value.operand_dim);\n });\n VLOG(3) << \"For a reshape from [\" << StrJoin(operand_shape, \",\") << \"] to [\"\n << StrJoin(result_shape, \",\") << \"] passthrough indices are [\"\n << StrJoin(result_strings, \",\")\n << \"] (legend: `result`->`operand`)\";\n }\n DCHECK(absl::c_is_sorted(\n result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {\n return lhs.result_dim < rhs.result_dim;\n }));\n DCHECK(absl::c_is_sorted(\n result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {\n return lhs.operand_dim < rhs.operand_dim;\n }));\n return result;\n}\nbool IsReshapePassthroughOperandDim(\n absl::Span passthrough_dims, int64_t dim) {\n return absl::c_any_of(passthrough_dims,\n [&](ReshapePassthroughDimPair passthrough_dim_pair) {\n return passthrough_dim_pair.operand_dim == dim;\n });\n}\nint64_t MapPassthroughOperandDimToResultDim(\n absl::Span passthrough_dims,\n int64_t operand_dim) {\n auto it = absl::c_find_if(\n passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) {\n return passthrough_dim_pair.operand_dim == operand_dim;\n });\n CHECK(it != passthrough_dims.end());\n return it->result_dim;\n}\nint64_t FindSourcePositionForPassthroughResultDim(\n absl::Span operand_shape,\n absl::Span result_shape, int64_t source_passthrough_dim) {\n VLOG(3) << \"FindSourcePositionForPassthroughResultDim([\"\n << StrJoin(operand_shape, \",\") << \"], [\" << StrJoin(result_shape, \",\")\n << \"], \" << source_passthrough_dim << \")\";\n int64_t indexed_source_subarray_size =\n std::accumulate(operand_shape.begin() + source_passthrough_dim + 1,\n operand_shape.end(), 1LL, std::multiplies());\n return FindSuffixWithProduct(result_shape, indexed_source_subarray_size);\n}\nShape StripDegenerateDimensions(const Shape& shape) {\n DimensionVector new_dims;\n absl::c_copy_if(shape.dimensions(), std::back_inserter(new_dims),\n [](int64_t dim) { return dim != 1; });\n return ShapeUtil::MakeShape(shape.element_type(), new_dims);\n}\n}; \nabsl::StatusOr\nIndexedArrayAnalysis::ReshapeToRemoveDegenerateDims(\n ScalarIndexedArray* operand) {\n const Shape& shape = operand->shape();\n if (!ShapeUtil::HasDegenerateDimensions(shape)) {\n return operand;\n }\n const Shape& source_shape = operand->source()->shape();\n DimensionVector new_source_shape_dims;\n for (int64_t i = 0, e = source_shape.dimensions_size(); i < e; i++) {\n if (i == operand->source_dim() || source_shape.dimensions(i) != 1) {\n new_source_shape_dims.push_back(source_shape.dimensions(i));\n }\n }\n Shape new_source_shape =\n ShapeUtil::MakeShape(shape.element_type(), new_source_shape_dims);\n Shape new_indices_shape =\n StripDegenerateDimensions(operand->indices()->shape());\n TF_ASSIGN_OR_RETURN(\n Array* const new_source,\n ComputeArrayForReshape(new_source_shape, operand->source()));\n TF_ASSIGN_OR_RETURN(\n Array* const new_indices,\n ComputeArrayForReshape(new_indices_shape, operand->indices()));\n DimensionVector new_output_dims;\n int64_t degenerate_dims_seen = 0;\n for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {\n if (shape.dimensions(i) == 1) {\n degenerate_dims_seen++;\n } else if (absl::c_linear_search(operand->output_dims(), i)) {\n new_output_dims.push_back(i - degenerate_dims_seen);\n }\n }\n int64_t degenerate_dims_before_source_dim =\n std::count(source_shape.dimensions().begin(),\n source_shape.dimensions().begin() + operand->source_dim(), 1);\n int64_t new_source_dim =\n operand->source_dim() - degenerate_dims_before_source_dim;\n return ConstructScalarIndexedArray(\n new_source, new_indices, new_source_dim,\n InlinedVectorToVector(new_output_dims),\n StripDegenerateDimensions(operand->shape()));\n}\nabsl::StatusOr\nIndexedArrayAnalysis::ReshapeToAddDegenerateDims(\n ScalarIndexedArray* operand, absl::Span degenerate_dims) {\n if (degenerate_dims.empty()) {\n return operand;\n }\n CHECK(!ShapeUtil::HasDegenerateDimensions(operand->shape()));\n DimensionVector new_output_dims = [&]() {\n absl::InlinedVector output_dims_bitvector(\n operand->shape().dimensions_size());\n for (int64_t output_dim : operand->output_dims()) {\n output_dims_bitvector[output_dim] = true;\n }\n for (int64_t degenerate_dim : degenerate_dims) {\n InsertAt(&output_dims_bitvector, degenerate_dim, false);\n }\n DimensionVector result;\n result.reserve(operand->output_dims().size());\n for (int64_t i = 0, e = output_dims_bitvector.size(); i < e; i++) {\n if (output_dims_bitvector[i]) {\n result.push_back(i);\n }\n }\n return result;\n }();\n DimensionVector new_result_shape_dims;\n absl::c_copy(operand->shape().dimensions(),\n std::back_inserter(new_result_shape_dims));\n for (int64_t degenerate_dim : degenerate_dims) {\n InsertAt(&new_result_shape_dims, degenerate_dim, 1);\n }\n DimensionVector new_source_shape_dims = new_result_shape_dims;\n for (int64_t output_dim : new_output_dims) {\n EraseAt(&new_source_shape_dims, output_dim);\n }\n int64_t new_source_dim = [&]() {\n for (int i = 0, e = new_source_shape_dims.size(); i < e; i++) {\n int64_t non_degenerate_dims_seen = 0;\n if (non_degenerate_dims_seen == operand->source_dim()) {\n return i;\n }\n if (new_source_shape_dims[new_source_dim] != 1) {\n non_degenerate_dims_seen++;\n }\n }\n LOG(FATAL) << \"Did not find source dim in \" << ToString(operand);\n }();\n int64_t source_dim_size =\n operand->source()->shape().dimensions(operand->source_dim());\n InsertAt(&new_source_shape_dims, new_source_dim,\n source_dim_size);\n Shape new_source_shape = ShapeUtil::MakeShape(operand->shape().element_type(),\n new_source_shape_dims);\n Shape new_result_shape = ShapeUtil::MakeShape(operand->shape().element_type(),\n new_result_shape_dims);\n TF_ASSIGN_OR_RETURN(\n Array* const new_source,\n ComputeArrayForReshape(new_source_shape, operand->source()));\n return ConstructScalarIndexedArray(\n new_source, operand->indices(), new_source_dim,\n InlinedVectorToVector(new_output_dims), new_result_shape);\n}\nabsl::StatusOr IndexedArrayAnalysis::FoldReshapeOfGather(\n const Shape& shape, ScalarIndexedConstantArray* operand) {\n VLOG(3) << \"FoldReshapeOfGather(\" << ToString(operand) << \")\";\n TF_ASSIGN_OR_RETURN(ScalarIndexedArray* const operand_without_degenerate_dims,\n ReshapeToRemoveDegenerateDims(operand));\n Shape output_shape_without_degenerate_dims = StripDegenerateDimensions(shape);\n TF_ASSIGN_OR_RETURN(\n ScalarIndexedArray* const folded_reshape_without_degenerate_dims,\n FoldReshapeOfGatherNoDegenerateDims(\n output_shape_without_degenerate_dims,\n operand_without_degenerate_dims->as()));\n if (folded_reshape_without_degenerate_dims == nullptr) {\n return nullptr;\n }\n DimensionVector degenerate_result_dims;\n for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) {\n if (shape.dimensions(i) == 1) {\n degenerate_result_dims.push_back(i);\n }\n }\n return ReshapeToAddDegenerateDims(folded_reshape_without_degenerate_dims,\n degenerate_result_dims);\n}\nabsl::StatusOr\nIndexedArrayAnalysis::FoldReshapeOfGatherNoDegenerateDims(\n const Shape& shape, ScalarIndexedConstantArray* scalar_indexed) {\n VLOG(3) << \"FoldReshapeOfGatherNoDegenerateDims(\" << ToString(scalar_indexed)\n << \")\";\n CHECK(!ShapeUtil::HasDegenerateDimensions(shape));\n CHECK(!ShapeUtil::HasDegenerateDimensions(scalar_indexed->shape()));\n std::vector reshape_passthrough_dims =\n ComputeReshapePassthroughDimPairs(\n scalar_indexed->shape().dimensions(),\n shape.dimensions());\n auto is_reshape_passthrough_operand_dim = [&](int64_t operand_dim) {\n return IsReshapePassthroughOperandDim(reshape_passthrough_dims,\n operand_dim);\n };\n if (!absl::c_all_of(scalar_indexed->output_dims(),\n is_reshape_passthrough_operand_dim)) {\n VLOG(3) << \"Not all output dims are passthrough dims \"\n << ToString(scalar_indexed);\n return nullptr;\n }\n std::vector new_scalar_indexed_source_shape(\n shape.dimensions().begin(), shape.dimensions().end());\n for (int64_t i = scalar_indexed->output_dims().size() - 1; i >= 0; i--) {\n int64_t output_dim = scalar_indexed->output_dims()[i];\n int64_t output_dim_after_reshape = MapPassthroughOperandDimToResultDim(\n reshape_passthrough_dims, output_dim);\n EraseAt(&new_scalar_indexed_source_shape, output_dim_after_reshape);\n }\n const Shape& scalar_indexed_source_shape = scalar_indexed->source()->shape();\n int64_t source_dim_for_new_scalar_indexed_node =\n FindSourcePositionForPassthroughResultDim(\n scalar_indexed_source_shape.dimensions(),\n new_scalar_indexed_source_shape,\n scalar_indexed->source_dim());\n if (source_dim_for_new_scalar_indexed_node == -1) {\n VLOG(3) << \"Could not compute the source dim for the new scalar indexed \"\n \"node: scalar_indexed_source_shape = [\"\n << StrJoin(scalar_indexed_source_shape.dimensions(), \",\")\n << \"] and new_scalar_indexed_source_shape = [\"\n << StrJoin(new_scalar_indexed_source_shape, \",\") << \"]\";\n return nullptr;\n }\n InsertAt(\n &new_scalar_indexed_source_shape, source_dim_for_new_scalar_indexed_node,\n scalar_indexed_source_shape.dimensions(scalar_indexed->source_dim()));\n CHECK_EQ(absl::c_accumulate(new_scalar_indexed_source_shape, 1LL,\n std::multiplies()),\n ShapeUtil::ElementsIn(scalar_indexed_source_shape));\n CHECK(IsReshapePassthroughOperandDim(\n ComputeReshapePassthroughDimPairs(\n scalar_indexed_source_shape.dimensions(),\n new_scalar_indexed_source_shape),\n scalar_indexed->source_dim()));\n auto map_passthrough_operand_dim_to_result_dim = [&](int64_t result_dim) {\n return MapPassthroughOperandDimToResultDim(reshape_passthrough_dims,\n result_dim);\n };\n std::vector output_dims_for_new_scalar_indexed_node;\n absl::c_transform(scalar_indexed->output_dims(),\n std::back_inserter(output_dims_for_new_scalar_indexed_node),\n map_passthrough_operand_dim_to_result_dim);\n TF_ASSIGN_OR_RETURN(const Literal* new_scalar_indexed_source_literal,\n TakeOwnership(scalar_indexed->literal().Reshape(\n new_scalar_indexed_source_shape)));\n TF_ASSIGN_OR_RETURN(\n Array * new_scalar_indexed_source,\n ComputeArrayForConstant(*new_scalar_indexed_source_literal));\n return ConstructScalarIndexedArray(\n new_scalar_indexed_source, scalar_indexed->indices(),\n source_dim_for_new_scalar_indexed_node,\n output_dims_for_new_scalar_indexed_node, shape);\n}\nabsl::StatusOr IndexedArrayAnalysis::ComputeArrayForReshape(\n const Shape& shape, Array* operand) {\n if (ShapeUtil::Compatible(operand->shape(), shape)) {\n return operand;\n }\n if (auto* scalar_indexed =\n dynamic_cast(operand)) {\n TF_ASSIGN_OR_RETURN(Analysis::Array * reshape_folded_into_gather,\n FoldReshapeOfGather(shape, scalar_indexed));\n if (reshape_folded_into_gather) {\n return reshape_folded_into_gather;\n }\n }\n if (auto* constant_array = dynamic_cast(operand)) {\n TF_ASSIGN_OR_RETURN(\n Literal* const new_literal,\n TakeOwnership(constant_array->literal()->Reshape(shape.dimensions())));\n return Construct(new_literal);\n }\n return Construct(operand, shape);\n}\nabsl::StatusOr\nIndexedArrayAnalysis::ComputeArrayForElementwiseBinaryOp(HloOpcode opcode,\n Array* lhs,\n Array* rhs) {\n ScalarIndexedConstantArray* lhs_scalar_indexed_const =\n dynamic_cast(lhs);\n ScalarIndexedConstantArray* rhs_scalar_indexed_const =\n dynamic_cast(rhs);\n bool lhs_is_indexed;\n if (lhs_scalar_indexed_const && !rhs_scalar_indexed_const) {\n lhs_is_indexed = true;\n } else if (rhs_scalar_indexed_const && !lhs_scalar_indexed_const) {\n lhs_is_indexed = false;\n } else {\n return nullptr;\n }\n ScalarIndexedConstantArray* scalar_indexed_const =\n lhs_is_indexed ? lhs_scalar_indexed_const : rhs_scalar_indexed_const;\n UnknownArray* candidate_broadcast_array =\n dynamic_cast(lhs_is_indexed ? rhs : lhs);\n if (!candidate_broadcast_array ||\n candidate_broadcast_array->instruction().opcode() !=\n HloOpcode::kBroadcast) {\n return nullptr;\n }\n const HloInstruction* broadcast_instr =\n &candidate_broadcast_array->instruction();\n const HloInstruction* broadcast_const_operand = broadcast_instr->operand(0);\n if (broadcast_const_operand->opcode() != HloOpcode::kConstant) {\n return nullptr;\n }\n absl::Span broadcast_dims = broadcast_instr->dimensions();\n auto is_broadcasted_dim = [&](int64_t output_dim) {\n return absl::c_find(broadcast_dims, output_dim) == broadcast_dims.end();\n };\n if (!absl::c_all_of(scalar_indexed_const->output_dims(),\n is_broadcasted_dim)) {\n return nullptr;\n }\n enum class IndexComponent { Broadcasted, NotBroadcasted };\n std::vector simulated_index(\n broadcast_instr->shape().dimensions_size(), IndexComponent::Broadcasted);\n for (int64_t broadcast_dim : broadcast_dims) {\n simulated_index[broadcast_dim] = IndexComponent::NotBroadcasted;\n }\n absl::Span output_dims = scalar_indexed_const->output_dims();\n for (int64_t i = output_dims.size() - 1; i >= 0; --i) {\n CHECK(simulated_index[output_dims[i]] == IndexComponent::Broadcasted);\n EraseAt(&simulated_index, output_dims[i]);\n }\n InsertAt(&simulated_index, scalar_indexed_const->source_dim(),\n IndexComponent::Broadcasted);\n std::vector new_inner_broadcast_dims;\n for (int64_t i = 0; i < simulated_index.size(); i++) {\n if (simulated_index[i] == IndexComponent::NotBroadcasted) {\n new_inner_broadcast_dims.push_back(i);\n }\n }\n TF_ASSIGN_OR_RETURN(\n Literal inner_broadcast_result,\n broadcast_const_operand->literal().Broadcast(\n scalar_indexed_const->source()->shape(), new_inner_broadcast_dims));\n const Literal* literal_for_new_source;\n if (lhs_is_indexed) {\n TF_ASSIGN_OR_RETURN(\n literal_for_new_source,\n TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp(\n opcode, scalar_indexed_const->literal(), inner_broadcast_result)));\n } else {\n TF_ASSIGN_OR_RETURN(\n literal_for_new_source,\n TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp(\n opcode, inner_broadcast_result, scalar_indexed_const->literal())));\n }\n ConstantArray* new_source = Construct(literal_for_new_source);\n return Construct(\n new_source, scalar_indexed_const->indices(),\n scalar_indexed_const->source_dim(),\n std::vector(scalar_indexed_const->output_dims().begin(),\n scalar_indexed_const->output_dims().end()),\n scalar_indexed_const->shape());\n}\nabsl::StatusOr\nIndexedArrayAnalysis::ComputeArrayForElementwiseUnaryOp(HloOpcode opcode,\n Array* operand) {\n auto* scalar_indexed_const =\n dynamic_cast(operand);\n if (scalar_indexed_const == nullptr) {\n return nullptr;\n }\n TF_ASSIGN_OR_RETURN(Literal * literal_for_new_source,\n TakeOwnership(HloEvaluator{}.EvaluateElementwiseUnaryOp(\n opcode, scalar_indexed_const->literal())));\n ConstantArray* new_source = Construct(literal_for_new_source);\n return Construct(\n new_source, scalar_indexed_const->indices(),\n scalar_indexed_const->source_dim(),\n SpanToVector(scalar_indexed_const->output_dims()),\n scalar_indexed_const->shape());\n}\nnamespace {\nstd::optional GetOnlyNonContractingNonBatchDim(\n int64_t rank, absl::Span contracting_dims,\n absl::Span batch_dims) {\n std::optional result;\n for (int64_t dim = 0; dim < rank; dim++) {\n if (!absl::c_linear_search(contracting_dims, dim) &&\n !absl::c_linear_search(batch_dims, dim)) {\n if (result.has_value()) {\n return std::nullopt;\n }\n result = dim;\n }\n }\n return result;\n}\nbool CanFoldDotIntoIndexedArray(\n absl::string_view tag, Analysis::ScalarIndexedConstantArray* indexed_array,\n absl::Span contracting_dims,\n absl::Span batch_dims) {\n std::optional non_contracting_non_batch_dim =\n GetOnlyNonContractingNonBatchDim(indexed_array->shape().rank(),\n contracting_dims, batch_dims);\n if (!non_contracting_non_batch_dim.has_value()) {\n VLOG(3) << tag << \": multiple or no non-contracting non-batch dimensions\";\n return false;\n }\n if (indexed_array->output_dims().size() != 1 ||\n indexed_array->output_dims()[0] != *non_contracting_non_batch_dim) {\n VLOG(3) << tag << \": output dims != the lhs non-contracting non-batch dim\";\n return false;\n }\n int64_t indexed_array_rank = indexed_array->shape().rank();\n if (indexed_array->source_dim() < (indexed_array_rank - 2)) {\n VLOG(3) << tag\n << \": source dim is not in the low two dims, won't be able to form \"\n \"a matmul\";\n return false;\n }\n return true;\n}\n} \nabsl::StatusOr\nIndexedArrayAnalysis::ComputeArrayForDotWithIndexedLhs(\n const Shape& shape, const DotDimensionNumbers& dim_numbers,\n const PrecisionConfig& precision_config, ScalarIndexedConstantArray* lhs,\n ConstantArray* rhs) {\n VLOG(3) << \"ComputeArrayForDotWithIndexedLhs(\" << ToString(lhs) << \" \"\n << ToString(rhs);\n if (!CanFoldDotIntoIndexedArray(\n \"ComputeArrayForDotWithIndexedLhs\", lhs, \n dim_numbers.lhs_contracting_dimensions(),\n dim_numbers.lhs_batch_dimensions())) {\n return nullptr;\n }\n int64_t lhs_rank = lhs->shape().rank();\n DotDimensionNumbers new_dim_numbers = dim_numbers;\n new_dim_numbers.set_lhs_contracting_dimensions(\n 0, lhs->source_dim() == (lhs_rank - 1) ? (lhs_rank - 2) : (lhs_rank - 1));\n TF_ASSIGN_OR_RETURN(\n Literal * literal_for_new_source,\n TakeOwnership(HloEvaluator{}.EvaluateDotOp(\n new_dim_numbers, precision_config, lhs->literal(), *rhs->literal())));\n int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() +\n dim_numbers.rhs_batch_dimensions_size();\n ConstantArray* new_source = Construct(literal_for_new_source);\n return Construct(\n new_source, lhs->indices(), new_source_dim,\n SpanToVector(lhs->output_dims()), shape);\n}\nabsl::StatusOr\nIndexedArrayAnalysis::ComputeArrayForDotWithIndexedRhs(\n const Shape& shape, const DotDimensionNumbers& dim_numbers,\n const PrecisionConfig& precision_config, ConstantArray* lhs,\n ScalarIndexedConstantArray* rhs) {\n VLOG(3) << \"ComputeArrayForDotWithIndexedRhs(\" << ToString(lhs) << \" \"\n << ToString(rhs);\n if (!CanFoldDotIntoIndexedArray(\n \"ComputeArrayForDotWithIndexedRhs\", rhs, \n dim_numbers.rhs_contracting_dimensions(),\n dim_numbers.rhs_batch_dimensions())) {\n return nullptr;\n }\n int64_t rhs_rank = rhs->shape().rank();\n DotDimensionNumbers new_dim_numbers = dim_numbers;\n new_dim_numbers.set_rhs_contracting_dimensions(\n 0, rhs->source_dim() == (rhs_rank - 1) ? (rhs_rank - 2) : (rhs_rank - 1));\n TF_ASSIGN_OR_RETURN(\n Literal * literal_for_new_source,\n TakeOwnership(HloEvaluator{}.EvaluateDotOp(\n new_dim_numbers, precision_config, *lhs->literal(), rhs->literal())));\n int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() +\n dim_numbers.rhs_batch_dimensions_size() + 1;\n ConstantArray* new_source = Construct(literal_for_new_source);\n return Construct(\n new_source, rhs->indices(), new_source_dim,\n SpanToVector(rhs->output_dims()), shape);\n}\nabsl::StatusOr IndexedArrayAnalysis::ComputeArrayForDot(\n const Shape& shape, const DotDimensionNumbers& dim_numbers,\n const PrecisionConfig& precision_config, Array* lhs, Array* rhs) {\n VLOG(3) << \"ComputeArrayForDot(\" << ToString(lhs) << \" \" << ToString(rhs);\n if (auto* lhs_indexed_array =\n dynamic_cast(lhs)) {\n if (auto* rhs_constant = dynamic_cast(rhs)) {\n return ComputeArrayForDotWithIndexedLhs(shape, dim_numbers,\n precision_config,\n lhs_indexed_array, rhs_constant);\n }\n }\n if (auto* rhs_indexed_array =\n dynamic_cast(rhs)) {\n if (auto* lhs_constant = dynamic_cast(lhs)) {\n return ComputeArrayForDotWithIndexedRhs(shape, dim_numbers,\n precision_config, lhs_constant,\n rhs_indexed_array);\n }\n }\n return nullptr;\n}\nabsl::StatusOr IndexedArrayAnalysisPrinterPass::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n if (!VLOG_IS_ON(2)) {\n return false;\n }\n IndexedArrayAnalysis analysis;\n for (auto* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n for (auto* instr : computation->instructions()) {\n TF_ASSIGN_OR_RETURN(Analysis::Array * t, analysis.GetArrayFor(instr));\n if (!dynamic_cast(t) && !dynamic_cast(t)) {\n VLOG(2) << instr->ToString() << \" -> \" << analysis.ToString(t);\n }\n }\n }\n return false;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/indexed_array_analysis.h\"\n#include \n#include \"absl/log/log.h\"\n#include \"absl/strings/ascii.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nclass IndexedArrayAnalysisTest : public HloTestBase {\n protected:\n void AssertArrayForRootExpressionIs(const std::string& hlo_text,\n const std::string& root_expression) {\n AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,\n false);\n }\n void AssertArrayWithConstantsForRootExpressionIs(\n const std::string& hlo_text, const std::string& root_expression) {\n AssertArrayForRootExpressionIsImpl(hlo_text, root_expression,\n true);\n }\n private:\n std::string CanonicalizeWhitespace(const std::string& text) {\n std::string result;\n for (char c : text) {\n if (!absl::ascii_isspace(c)) {\n result.push_back(c);\n } else if (!result.empty() && result.back() != ' ') {\n result.push_back(' ');\n }\n }\n while (!result.empty() && result.back() == ' ') {\n result.pop_back();\n }\n return result;\n }\n void AssertArrayForRootExpressionIsImpl(const std::string& hlo_text,\n const std::string& root_expression,\n bool print_constants) {\n IndexedArrayAnalysis indexed_tensor_analysis;\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr m,\n ParseAndReturnVerifiedModule(hlo_text));\n TF_ASSERT_OK_AND_ASSIGN(IndexedArrayAnalysis::Array* const array_result,\n indexed_tensor_analysis.GetArrayFor(\n m->entry_computation()->root_instruction()));\n std::string string_result = CanonicalizeWhitespace(\n indexed_tensor_analysis.ToString(array_result, print_constants));\n LOG(INFO) << string_result;\n ASSERT_EQ(string_result, CanonicalizeWhitespace(root_expression));\n }\n};\nTEST_F(IndexedArrayAnalysisTest, SimpleOneToOneGather) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,3] parameter(0)\n indices = s32[5] parameter(1)\n ROOT gather = s32[5,3] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,3}\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text,\n \"(scalar-indexed %operand %indices 0->[0])\");\n}\nTEST_F(IndexedArrayAnalysisTest, SimpleOneToOneConstantGather) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})\n indices = s32[5] parameter(0)\n ROOT gather = s32[5,3] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,3}\n}\n)\";\n AssertArrayForRootExpressionIs(\n hlo_text, \"(scalar-indexed-const (constant s32[3,3]) %indices 0->[0])\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed0) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})\n indices = s32[5,2] parameter(0)\n ROOT gather = s32[5] gather(operand, indices),\n offset_dims={},\n collapsed_slice_dims={0,1},\n start_index_map={0,1},\n index_vector_dim=1,\n slice_sizes={1,1}\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text, \"%gather\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed1) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,3,1] parameter(0)\n indices = s32[5] parameter(1)\n ROOT gather = s32[5,3] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0,2},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,3,1}\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text, \"%gather\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed2) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,3,1] parameter(0)\n indices = s32[5] parameter(1)\n ROOT gather = s32[5,2,3] gather(operand, indices),\n offset_dims={1,2},\n collapsed_slice_dims={2},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={2,3,1}\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text, \"%gather\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed3) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,3] parameter(0)\n indices = s32[5] parameter(1)\n ROOT gather = s32[5,2] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,2}\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text, \"%gather\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOne) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}})\n indices_a = s32[5] parameter(0)\n indices_b = s32[2] parameter(1)\n gather_a = s32[5,3] gather(operand, indices_a),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,3}\n ROOT gather_b = s32[2,3] gather(gather_a, indices_b),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,3}\n}\n)\";\n AssertArrayForRootExpressionIs(\n hlo_text,\n \"(scalar-indexed-const (constant s32[3,3]) (scalar-indexed %indices_a \"\n \"%indices_b 0->[0]) 0->[0])\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithOneToOne) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,2] parameter(0)\n indices_a = s32[5,7] parameter(1)\n indices_b = s32[2] parameter(2)\n gather_a = s32[5,3,7] gather(operand, indices_a),\n offset_dims={1},\n collapsed_slice_dims={1},\n start_index_map={1},\n index_vector_dim=2,\n slice_sizes={3,1}\n ROOT gather_b = s32[5,3,2] gather(gather_a, indices_b),\n offset_dims={0,1},\n collapsed_slice_dims={2},\n start_index_map={2},\n index_vector_dim=1,\n slice_sizes={5,3,1}\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text,\n \"(scalar-indexed %operand (scalar-indexed \"\n \"%indices_a %indices_b 1->[1]) 1->[0,2])\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOneWithManyToOne) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,6] parameter(0)\n indices_a = s32[2] parameter(1)\n indices_b = s32[5,7] parameter(2)\n gather_a = s32[2,6] gather(operand, indices_a),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,6}\n ROOT gather_b = s32[5,6,7] gather(gather_a, indices_b),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=2,\n slice_sizes={1,6}\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text,\n \"(scalar-indexed %operand (scalar-indexed \"\n \"%indices_a %indices_b 0->[0,1]) 0->[0,2])\");\n}\nTEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithManyToOne) {\n std::string hlo_text = R\"(\nHloModule SimpleGather\nENTRY main {\n operand = s32[3,2] parameter(0)\n indices_a = s32[5,7] parameter(1)\n indices_b = s32[4,8] parameter(2)\n gather_a = s32[5,3,7] gather(operand, indices_a),\n offset_dims={1},\n collapsed_slice_dims={1},\n start_index_map={1},\n index_vector_dim=2,\n slice_sizes={3,1}\n ROOT gather_b = s32[4,5,3,8] gather(gather_a, indices_b),\n offset_dims={1,2},\n collapsed_slice_dims={2},\n start_index_map={2},\n index_vector_dim=2,\n slice_sizes={5,3,1}\n}\n)\";\n AssertArrayForRootExpressionIs(\n hlo_text,\n \"(scalar-indexed %operand (scalar-indexed %indices_a %indices_b \"\n \"1->[0,2]) 1->[0,1,3])\");\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather0) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})\n indices = s32[5] parameter(0)\n gather = s32[5,4] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT reshape = s32[5,2,2] reshape(gather)\n}\n)\";\n AssertArrayForRootExpressionIs(\n hlo_text, \"(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0])\");\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather1) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})\n indices = s32[5,7] parameter(0)\n gather = s32[5,4,7] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=2,\n slice_sizes={1,4}\n ROOT reshape = s32[5,2,2,7] reshape(gather)\n}\n)\";\n AssertArrayForRootExpressionIs(\n hlo_text,\n \"(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0,3])\");\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather2) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[3,2,6] constant({\n {{1,2,3,4,5,6},{1,2,3,4,5,6}},\n {{1,2,3,4,5,6},{1,2,3,4,5,6}},\n {{1,2,3,4,5,6},{1,2,3,4,5,6}}})\n indices = s32[5,7] parameter(0)\n gather = s32[5,2,6,7] gather(operand, indices),\n offset_dims={1,2},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=2,\n slice_sizes={1,2,6}\n ROOT reshape = s32[5,3,4,7] reshape(gather)\n}\n)\";\n AssertArrayForRootExpressionIs(\n hlo_text,\n \"(scalar-indexed-const (constant s32[3,3,4]) %indices 0->[0,3])\");\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather3) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[2,6] constant({\n {1,2,3,4,5,6},{1,2,3,4,5,6}})\n indices = s32[1] parameter(0)\n gather = s32[1,6] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,6}\n ROOT reshape = s32[1,1,6] reshape(gather)\n}\n)\";\n const char* expected_root_expression = R\"(\n(scalar-indexed-const\n (constant s32[2,1,1,6])\n (reshape %indices to s32[])\n 0->[])\n)\";\n AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather4) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 1, 2, 3 } })\n i.0 = s64[1,3]{1,0} parameter(0)\n g.0 = s32[1,3,3]{2,1,0} gather(operand, i.0), offset_dims={2},\n collapsed_slice_dims={0}, start_index_map={0},\n index_vector_dim=2, slice_sizes={1,3}\n i.1 = s64[1] parameter(1)\n g.1 = s32[1,1,3]{2,1,0} gather(g.0, i.1), offset_dims={0,2},\n collapsed_slice_dims={1}, start_index_map={1},\n index_vector_dim=1, slice_sizes={1,1,3}\n ROOT reshape = s32[1,3]{1,0} reshape(g.1)\n}\n)\";\n const char* expected_root_expression = R\"(\n(scalar-indexed-const\n (constant s32[2,1,3])\n (reshape\n (scalar-indexed %i.0 %i.1 1->[1])\n to s64[])\n 0->[])\n)\";\n AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather5) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[1,6] constant({{1,2,3,4,5,6}})\n indices = s32[1] parameter(0)\n gather = s32[1,6] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,6}\n ROOT reshape = s32[1,1,6] reshape(gather)\n}\n)\";\n const char* expected_root_expression = R\"(\n(scalar-indexed-const\n (constant s32[1,1,1,6])\n (reshape %indices to s32[])\n 0->[])\n)\";\n AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather6) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[1,2,6] constant({{\n {1,2,3,4,5,6},{1,2,3,4,5,6}}})\n indices = s32[1] parameter(0)\n gather = s32[1,1,6] gather(operand, indices),\n offset_dims={1,2},\n collapsed_slice_dims={1},\n start_index_map={1},\n index_vector_dim=1,\n slice_sizes={1,1,6}\n ROOT reshape = s32[1,1,1,6] reshape(gather)\n}\n)\";\n const char* expected_root_expression = R\"(\n(scalar-indexed-const\n (constant s32[2,1,1,1,6] s32[2,1,1,1,6] {\n { { { { 1, 2, 3, 4, 5, 6 } } } },\n { { { { 1, 2, 3, 4, 5, 6 } } } } })\n (reshape %indices to s32[])\n 0->[])\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text,\n expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGather7) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[2,6] constant({\n {1,2,3,4,5,6},{1,2,3,4,5,6}})\n indices = s32[1,5] parameter(0)\n gather = s32[1,5,6] gather(operand, indices),\n offset_dims={2},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=2,\n slice_sizes={1,6}\n ROOT reshape = s32[1,1,5,6] reshape(gather)\n}\n)\";\n const char* expected_root_expression = R\"(\n(scalar-indexed-const\n (constant s32[2,1,1,6] s32[2,1,1,6] {\n { { { 1, 2, 3, 4, 5, 6 } } },\n { { { 1, 2, 3, 4, 5, 6 } } } })\n (reshape %indices to s32[5])\n 0->[2])\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text,\n expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold0) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}})\n indices = s32[5,6] parameter(0)\n gather = s32[5,4,6] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=2,\n slice_sizes={1,4}\n ROOT reshape = s32[5,2,2,2,3] reshape(gather)\n}\n)\";\n const char* expected_root_expression = R\"(\n(reshape\n (scalar-indexed-const\n (constant s32[3,4])\n %indices\n 0->[0,2])\n to s32[5,2,2,2,3])\n)\";\n AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold1) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[3,5,2] constant({\n {{1,2},{3,4},{5,6},{7,8},{9,10}},\n {{1,2},{3,4},{5,6},{7,8},{9,10}},\n {{1,2},{3,4},{5,6},{7,8},{9,10}}})\n indices = s32[7] parameter(0)\n gather = s32[3,2,7] gather(operand, indices),\n offset_dims={0,1},\n collapsed_slice_dims={1},\n start_index_map={1},\n index_vector_dim=1,\n slice_sizes={3,1,2}\n ROOT reshape = s32[6,7] reshape(gather)\n}\n)\";\n const char* expected_root_expression = R\"(\n(reshape\n (scalar-indexed-const\n (constant s32[3,5,2])\n %indices\n 1->[2])\n to s32[6,7])\n)\";\n AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold2) {\n std::string hlo_text = R\"(\nHloModule ReshapeOfGather\nENTRY main {\n operand = s32[3,4,1] constant({\n {{1},{2},{3},{4}},\n {{1},{2},{3},{4}},\n {{1},{2},{3},{4}}})\n indices = s32[5,6] parameter(0)\n gather = s32[5,4,6,1] gather(operand, indices),\n offset_dims={1,3},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=2,\n slice_sizes={1,4,1}\n ROOT reshape = s32[5,2,2,2,3,1] reshape(gather)\n}\n)\";\n const char* expected_root_expression = R\"(\n(reshape\n (scalar-indexed-const\n (constant s32[3,4,1])\n %indices\n 0->[0,2])\n to s32[5,2,2,2,3,1])\n)\";\n AssertArrayForRootExpressionIs(hlo_text, expected_root_expression);\n}\nTEST_F(IndexedArrayAnalysisTest, UnaryOpOfGather) {\n std::string hlo_text = R\"(\nHloModule UnaryOpOfGather\nENTRY main {\n operand = f32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})\n indices = s32[5] parameter(0)\n gather = f32[5,4] gather(operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT tanh = f32[5,4] tanh(gather)\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const (constant f32[3,4] f32[3,4] {\n { 0.761594176, 0.964027584, 0.995054781, 0.999329329 },\n { 0.761594176, 0.995054781, 0.964027584, 0.999329329 },\n { 0.999329329, 0.995054781, 0.964027584, 0.761594176 }\n}) %indices 0->[0]))\");\n}\nTEST_F(IndexedArrayAnalysisTest, AddBroadcastedScalarWithGather) {\n std::string hlo_text = R\"(\nHloModule AddBroadcastedScalarWithGather\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})\n constant = s32[] constant(5)\n constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}\n indices = s32[5] parameter(0)\n gather = s32[5,4] gather(gather_operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT add = s32[5,4] add(gather, constant_broadcasted)\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const (constant s32[3,4] s32[3,4] {\n { 6, 7, 8, 9 },\n { 6, 8, 7, 9 },\n { 9, 8, 7, 6 }\n}) %indices 0->[0]))\");\n}\nTEST_F(IndexedArrayAnalysisTest,\n SubtractBroadcastedScalarWithGather_GatherIsLhs) {\n std::string hlo_text = R\"(\nHloModule SubtractBroadcastedScalarWithGather\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})\n constant = s32[] constant(5)\n constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}\n indices = s32[5] parameter(0)\n gather = s32[5,4] gather(gather_operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT sub = s32[5,4] subtract(gather, constant_broadcasted)\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const (constant s32[3,4] s32[3,4] {\n { -4, -3, -2, -1 },\n { -4, -2, -3, -1 },\n { -1, -2, -3, -4 }\n}) %indices 0->[0]))\");\n}\nTEST_F(IndexedArrayAnalysisTest,\n SubtractBroadcastedScalarWithGather_GatherIsRhs) {\n std::string hlo_text = R\"(\nHloModule SubtractBroadcastedScalarWithGather\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})\n constant = s32[] constant(5)\n constant_broadcasted = s32[5,4] broadcast(constant), dimensions={}\n indices = s32[5] parameter(0)\n gather = s32[5,4] gather(gather_operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT sub = s32[5,4] subtract(constant_broadcasted, gather)\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const (constant s32[3,4] s32[3,4] {\n { 4, 3, 2, 1 },\n { 4, 2, 3, 1 },\n { 1, 2, 3, 4 }\n}) %indices 0->[0]))\");\n}\nTEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather) {\n std::string hlo_text = R\"(\nHloModule AddBroadcastedVectorWithGather\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})\n constant_vect = s32[4] constant({10,11,12,13})\n constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={1}\n indices = s32[5] parameter(0)\n gather = s32[5,4] gather(gather_operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT add = s32[5,4] add(gather, constant_broadcasted)\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const (constant s32[3,4] s32[3,4] {\n { 11, 13, 15, 17 },\n { 11, 14, 14, 17 },\n { 14, 14, 14, 14 }\n}) %indices 0->[0]))\");\n}\nTEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather_Negative) {\n std::string hlo_text = R\"(\nHloModule AddBroadcastedVectorWithGather\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}})\n constant_vect = s32[5] constant({10,11,12,13,14})\n constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={0}\n indices = s32[5] parameter(0)\n gather = s32[5,4] gather(gather_operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT add = s32[5,4] add(gather, constant_broadcasted)\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text, \"%add\");\n}\nTEST_F(IndexedArrayAnalysisTest, RegularUnaryOp) {\n std::string hlo_text = R\"(\nHloModule RegularUnaryOp\nENTRY main {\n input = f32[100] parameter(0)\n ROOT tanh = f32[100] tanh(input)\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text, \"%tanh\");\n}\nTEST_F(IndexedArrayAnalysisTest, RegularBinaryOp) {\n std::string hlo_text = R\"(\nHloModule RegularUnaryOp\nENTRY main {\n input0 = f32[100] parameter(0)\n input1 = f32[100] parameter(1)\n ROOT add = f32[100] add(input0, input1)\n}\n)\";\n AssertArrayForRootExpressionIs(hlo_text, \"%add\");\n}\nTEST_F(IndexedArrayAnalysisTest, DotOpBasic_0) {\n std::string hlo_text = R\"(\nHloModule DotOp\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})\n dot_rhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})\n indices = s32[5] parameter(0)\n dot_lhs = s32[5,4] gather(gather_operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,4}\n ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const\n (constant s32[3,3] s32[3,3] {\n { 70, 80, 90 },\n { 158, 184, 210 },\n { 246, 288, 330 } })\n %indices 0->[0]))\");\n}\nTEST_F(IndexedArrayAnalysisTest, DotOpBasic_1) {\n std::string hlo_text = R\"(\nHloModule DotOp\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})\n dot_rhs_constant = s32[3,3] constant({{1,2,3},{4,5,6},{7,8,9}})\n indices = s32[5] parameter(0)\n dot_lhs = s32[3,5] gather(gather_operand, indices),\n offset_dims={0},\n collapsed_slice_dims={1},\n start_index_map={1},\n index_vector_dim=1,\n slice_sizes={3,1}\n ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const\n (constant s32[4,3] s32[4,3] {\n { 84, 99, 114 },\n { 96, 114, 132 },\n { 108, 129, 150 },\n { 120, 144, 168 } })\n %indices 0->[1]))\");\n}\nTEST_F(IndexedArrayAnalysisTest, DotOpBasic_2) {\n std::string hlo_text = R\"(\nHloModule DotOp\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})\n dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})\n indices = s32[5] parameter(0)\n dot_rhs = s32[3,5] gather(gather_operand, indices),\n offset_dims={0},\n collapsed_slice_dims={1},\n start_index_map={1},\n index_vector_dim=1,\n slice_sizes={3,1}\n ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const\n (constant s32[4,4] s32[4,4] {\n { 38, 44, 50, 56 },\n { 83, 98, 113, 128 },\n { 128, 152, 176, 200 },\n { 173, 206, 239, 272 } })\n %indices 1->[1])\n)\");\n}\nTEST_F(IndexedArrayAnalysisTest, DotOpBasic_3) {\n std::string hlo_text = R\"(\nHloModule DotOp\nENTRY main {\n gather_operand = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})\n dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}})\n indices = s32[5] parameter(0)\n dot_rhs = s32[5,3] gather(gather_operand, indices),\n offset_dims={1},\n collapsed_slice_dims={0},\n start_index_map={0},\n index_vector_dim=1,\n slice_sizes={1,3}\n ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const\n (constant s32[4,4] s32[4,4] {\n { 14, 32, 50, 68 },\n { 32, 77, 122, 167 },\n { 50, 122, 194, 266 },\n { 68, 167, 266, 365 } })\n %indices 1->[0])\n)\");\n}\nTEST_F(IndexedArrayAnalysisTest, DotOpWithBatch) {\n std::string hlo_text = R\"(\nHloModule DotOp\nENTRY main {\n gather_operand = s32[2,3,2] constant({{{1,2},{3,4},{5,6}},{{7,8},{9,10},{11,12}}})\n dot_lhs_constant = s32[2,2,3] constant({{{1,2,3},{4,5,6}},{{7,8,9},{10,11,12}}})\n indices = s32[4] parameter(0)\n dot_rhs = s32[2,3,4] gather(gather_operand, indices),\n offset_dims={0,1},\n collapsed_slice_dims={2},\n start_index_map={2},\n index_vector_dim=1,\n slice_sizes={2,3,1}\n ROOT dot = s32[2,2,4] dot(dot_lhs_constant, dot_rhs),\n lhs_contracting_dims={2}, rhs_contracting_dims={1},\n lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, R\"(\n(scalar-indexed-const\n (constant s32[2,2,2] s32[2,2,2] {\n { { 22, 28 },\n { 49, 64 } },\n { { 220, 244 },\n { 301, 334 } } })\n %indices 3->[2])\n)\");\n}\nTEST_F(IndexedArrayAnalysisTest, DotOpNegative) {\n std::string hlo_text = R\"(\nHloModule DotOp\nENTRY main {\n gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}})\n dot_rhs_constant = s32[2,3] constant({{1,2,3},{4,5,6}})\n indices = s32[2] parameter(0)\n dot_lhs = s32[3,2] gather(gather_operand, indices),\n offset_dims={0},\n collapsed_slice_dims={1},\n start_index_map={1},\n index_vector_dim=1,\n slice_sizes={3,1}\n ROOT dot = s32[3,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n AssertArrayWithConstantsForRootExpressionIs(hlo_text, \"%dot\");\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1170,"cells":{"ID":{"kind":"string","value":"0246551a-2064-4f3b-ad55-2a13cb8a1a45"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"all_gather_broadcast_reorder"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/all_gather_broadcast_reorder.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/all_gather_broadcast_reorder_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/all_gather_broadcast_reorder.h\"\n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nabsl::StatusOr AllGatherBroadcastReorder::Run(\n HloModule *module,\n const absl::flat_hash_set &execution_threads) {\n if (hlo_query::ContainsLayoutConstrainedCollective(*module,\n HloOpcode::kAllGather)) {\n VLOG(1) << \"Skip AllGatherBroadcastReorder because the module contains \"\n \"all-gather with constrained layouts\";\n return false;\n }\n int64_t next_channel_id = hlo_query::NextChannelId(*module);\n bool changed = false;\n for (auto computation : module->computations(execution_threads)) {\n for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {\n if (inst->opcode() != HloOpcode::kAllGather || !inst->shape().IsArray() ||\n inst->operand(0)->opcode() != HloOpcode::kBroadcast) {\n continue;\n }\n HloAllGatherInstruction *ag = Cast(inst);\n HloBroadcastInstruction *bcast =\n Cast(inst->mutable_operand(0));\n absl::flat_hash_set non_uniform_dims;\n non_uniform_dims.insert(bcast->dimensions().begin(),\n bcast->dimensions().end());\n const bool all_gather_along_uniform_dim =\n non_uniform_dims.insert(ag->all_gather_dimension()).second;\n int64_t uniform_dim_size = 1;\n for (int64_t i = 0; i < ag->shape().rank(); ++i) {\n if (non_uniform_dims.count(i) == 0) {\n uniform_dim_size *= ag->shape().dimensions(i);\n }\n }\n if (uniform_dim_size == 1) {\n continue;\n }\n HloInstruction *replacement;\n const int64_t ag_dim = ag->all_gather_dimension();\n if (!all_gather_along_uniform_dim) {\n VLOG(2) << \"All-gather along non uniform dimension\";\n auto ag_dim_index = PositionInContainer(bcast->dimensions(), ag_dim);\n Shape new_ag_shape = bcast->operand(0)->shape();\n new_ag_shape.set_dimensions(ag_dim_index,\n ag->shape().dimensions(ag_dim));\n auto *new_ag =\n Cast(computation->AddInstruction(\n ag->CloneWithNewOperands(new_ag_shape, bcast->operands())));\n if (ag->channel_id()) {\n new_ag->set_channel_id(next_channel_id++);\n }\n new_ag->set_all_gather_dimension(ag_dim_index);\n replacement = computation->AddInstruction(\n bcast->CloneWithNewOperands(ag->shape(), {new_ag}));\n } else {\n VLOG(2) << \"All-gather along uniform dimension\";\n HloInstruction *x = bcast->mutable_operand(0);\n std::vector shape_dims{1};\n absl::Span x_dims = x->shape().dimensions();\n shape_dims.insert(shape_dims.end(), x_dims.begin(), x_dims.end());\n Shape shape =\n ShapeUtil::MakeShape(x->shape().element_type(), shape_dims);\n HloInstruction *rs0 = computation->AddInstruction(\n HloInstruction::CreateReshape(shape, x));\n const int64_t ag_factor = ag->shape().dimensions(ag_dim) /\n ag->operand(0)->shape().dimensions(ag_dim);\n shape.set_dimensions(0, ag_factor);\n auto *new_ag =\n Cast(computation->AddInstruction(\n ag->CloneWithNewOperands(shape, {rs0})));\n if (ag->channel_id()) {\n new_ag->set_channel_id(next_channel_id++);\n }\n new_ag->set_all_gather_dimension(0);\n std::vector bcast_shape_dims =\n SpanToVector(ag->shape().dimensions());\n bcast_shape_dims[ag_dim] = ag_factor;\n bcast_shape_dims.insert(bcast_shape_dims.begin() + ag_dim + 1,\n ag->shape().dimensions(ag_dim) / ag_factor);\n Shape bcast_shape =\n ShapeUtil::MakeShape(x->shape().element_type(), bcast_shape_dims);\n std::vector bcast_dims;\n bcast_dims.push_back(ag_dim);\n for (int64_t d : bcast->dimensions()) {\n bcast_dims.push_back(d + (d > ag_dim));\n }\n HloInstruction *bcast = computation->AddInstruction(\n HloInstruction::CreateBroadcast(bcast_shape, new_ag, bcast_dims));\n replacement = computation->AddInstruction(\n HloInstruction::CreateReshape(ag->shape(), bcast));\n }\n TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(replacement));\n TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ag));\n changed = true;\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/all_gather_broadcast_reorder.h\"\n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace m = xla::testing::opcode_matchers;\nclass AllGatherBroadcastReorderTest : public HloTestBase {\n public:\n enum class PassOutput { NoChange, NonUniformAGPattern, UniformAGPattern };\n void RunPass(absl::string_view hlo_module, PassOutput expected_output) {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_module));\n auto changed = AllGatherBroadcastReorder().Run(module.get());\n ASSERT_TRUE(changed.ok());\n if (expected_output == PassOutput::NoChange) {\n EXPECT_FALSE(changed.value());\n } else {\n EXPECT_TRUE(changed.value());\n if (expected_output == PassOutput::NonUniformAGPattern) {\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n m::Broadcast(m::AllGather(m::Parameter())));\n } else {\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n m::Reshape(m::Broadcast(m::AllGather(m::Reshape(m::Parameter())))));\n }\n }\n }\n};\nTEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongNonUniformDim) {\n absl::string_view hlo_string = R\"(\nHloModule m\nENTRY main {\n x = f32[128, 5] parameter(0)\n bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0}\n ROOT ag = f32[5, 4, 8, 256] all-gather(bc), dimensions={3}, replica_groups={{0, 1}}\n}\n)\";\n RunPass(hlo_string, PassOutput::NonUniformAGPattern);\n}\nTEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongUniformDim) {\n absl::string_view hlo_string = R\"(\nHloModule m\nENTRY main {\n x = f32[128, 5] parameter(0)\n bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0}\n ROOT ag = f32[5, 12, 8, 128] all-gather(bc), dimensions={1}, replica_groups={{0, 1, 2}}\n}\n)\";\n RunPass(hlo_string, PassOutput::UniformAGPattern);\n}\nTEST_F(AllGatherBroadcastReorderTest, Simple_GatherBroadcastScalar) {\n absl::string_view hlo_string = R\"(\nHloModule m\nENTRY main {\n x = f32[] parameter(0)\n bc = f32[4, 8] broadcast(x), dimensions={}\n ROOT ag = f32[12, 8] all-gather(bc), dimensions={0}, replica_groups={{0, 1, 2}}\n}\n)\";\n RunPass(hlo_string, PassOutput::UniformAGPattern);\n}\nTEST_F(AllGatherBroadcastReorderTest, T5Test) {\n absl::string_view hlo_string = R\"(\nHloModule m\nENTRY main {\n x = f32[128] parameter(0)\n bc = f32[1,4,84,128]{3,2,1,0} broadcast(x), dimensions={3}\n ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(bc), channel_id=6,\n replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true\n}\n)\";\n RunPass(hlo_string, PassOutput::UniformAGPattern);\n}\nTEST_F(AllGatherBroadcastReorderTest, FailedMatch) {\n absl::string_view hlo_string = R\"(\nHloModule m\nENTRY main {\n x = f32[1,4,84,128] parameter(0)\n ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(x), channel_id=6,\n replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true\n}\n)\";\n RunPass(hlo_string, PassOutput::NoChange);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_broadcast_reorder.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_broadcast_reorder_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1171,"cells":{"ID":{"kind":"string","value":"9aa7f320-8708-44ab-8106-6d86e1782605"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"space_to_batch_converter"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/space_to_batch_converter.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/space_to_batch_converter_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/space_to_batch_converter.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/algorithm.h\"\n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"xla/debug_options_flags.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/shape_inference.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/tsl/lib/core/bitmap.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/status.h\"\nnamespace xla {\nnamespace {\nnamespace m = match;\nconstexpr int64_t kNumMappedDims = 3;\nclass ConvolutionVisitor {\n public:\n absl::Status PerformSpaceToBatchOnConvolution(HloInstruction* convolution);\n struct ConvDetails {\n std::vector spatial_dimensions_to_split;\n int64_t inherent_low_padding, inherent_high_padding, stride, spatial_size,\n base_dilation_factor, halo_size, high_padding_for_conv,\n low_padding_for_conv, kernel_spatial_dim_size, input_dim_size;\n };\n ConvDetails GetConvolutionDetails(HloInstruction* convolution,\n ConvolutionDimensionNumbers& dim_numbers);\n std::pair, std::vector> GetSpatialDimsToSplit(\n HloInstruction* old_operand);\n bool IsForwardWindowDilatedConv(HloInstruction* convolution,\n ConvolutionDimensionNumbers& dim_numbers);\n bool CanPropagate(HloInstruction* consumer, HloInstruction* producer);\n bool IsBroadcastTree(HloInstruction* op, HloInstruction* consumer,\n std::vector& instructions_to_transform);\n void RewriteBroadcastTree(\n HloInstruction* producer,\n std::vector& instructions_to_transform);\n void PropagateOnBroadcast(HloInstruction* consumer, HloInstruction* producer);\n bool IsOpcodeNonPropagatable(HloInstruction* consumer);\n bool SupportedOpForPropagation(HloInstruction* consumer,\n HloInstruction* producer);\n bool SupportedDotForPropagation(HloInstruction* consumer,\n HloInstruction* producer);\n bool IsBroadcastPropagatable(HloInstruction* broadcast,\n HloInstruction* old_other_op);\n absl::StatusOr Propagate(HloInstruction* consumer,\n HloInstruction* producer);\n absl::StatusOr>> SplitSpace(\n HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,\n int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding,\n int64_t spatial_split_size, int64_t num_splits,\n std::vector* spatial_dimensions_to_split,\n bool is_backprop = false, bool is_rhs = false);\n absl::StatusOr PerformSplitSpace(\n HloInstruction* activations,\n absl::Span spatial_dimensions_to_split,\n int64_t activations_batch_dim, int64_t spatial_split_size,\n int64_t num_splits);\n absl::StatusOr TransposeAndMergeBatch(\n HloInstruction* activations,\n absl::Span final_split_spatial_dim_positioning,\n int64_t activations_batch_dim, int64_t old_batch_size);\n absl::StatusOr PadAndSplitSpace(\n HloInstruction* activations,\n absl::Span spatial_dimensions_to_split,\n int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding,\n int64_t spatial_split_size, int64_t num_splits);\n absl::StatusOr PropagateOnConstant(HloInstruction* consumer,\n HloInstruction* producer);\n absl::Status PropagateOnConv(HloInstruction* convolution);\n absl::Status PropagateOnConcat(HloInstruction* concat);\n absl::Status PropagateOnReverse(HloInstruction* reverse);\n absl::Status PropagateOnPad(HloInstruction* pad);\n absl::Status PropagateOnSlice(HloInstruction* slice);\n absl::Status PropagateOnBackpropFilterConv(HloInstruction* convolution);\n bool IsConvSuitableForSpaceToBatch(HloInstruction* convolution);\n bool IsThisBackPropFilterConv(HloInstruction* convolution);\n absl::Status PropagateOnUsers(HloInstruction* old_conv);\n absl::StatusOr SelectValidPortion(\n HloInstruction* new_instr, HloInstruction* old_instr,\n HloInstruction* select_val, int64_t new_batch_dim,\n absl::Span new_space_dims, int64_t old_batch_dim,\n absl::Span old_space_dims);\n struct SpaceNextToBatchDetails {\n HloInstruction* instr;\n std::vector transpose_dims;\n };\n absl::StatusOr BringSpaceNextToBatch(\n HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,\n int64_t& activations_batch_dim,\n std::vector* spatial_dimensions_to_split,\n bool is_backprop = false, bool is_rhs = false);\n absl::StatusOr ChangeSpatialSizeOnSpaceToBatchedShape(\n HloInstruction* activations, int64_t batch_dimension,\n int64_t old_batch_size,\n absl::Span spatial_dimensions_to_split,\n int64_t new_spatial_dim_size, bool increase_spatial_size = false);\n absl::StatusOr SplitAndTransposeMergedBatch(\n HloInstruction* activations, int64_t batch_dimension,\n int64_t old_batch_size, absl::Span spatial_dimensions);\n absl::StatusOr BatchToSpace(HloInstruction* old_instr);\n absl::StatusOr HaloDuplicateWithSlice(\n HloInstruction* activations,\n absl::Span spatial_dimensions_to_split,\n int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,\n HloInstruction* pad_val = nullptr);\n absl::StatusOr Run();\n const bool changed() const { return changed_; }\n ~ConvolutionVisitor() = default;\n explicit ConvolutionVisitor(SpaceToBatchController ctrl,\n HloComputation* computation);\n int64_t GetFirstChosenSpatialDim(HloInstruction* convolution) {\n const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;\n const int64_t end_point = convolution->convolution_dimension_numbers()\n .input_spatial_dimensions_size() -\n ctrl_.dimension_from_end_to_convert;\n return end_point - dim_count + 1;\n }\n std::vector GetChosenSpatialDims(HloInstruction* convolution) {\n const int64_t dim_count = ctrl_.count_of_dimensions_to_convert;\n const int64_t first_dim = GetFirstChosenSpatialDim(convolution);\n std::vector dims(dim_count);\n for (int i = 0; i < dim_count; ++i) {\n dims[i] =\n convolution->convolution_dimension_numbers().input_spatial_dimensions(\n first_dim + i);\n }\n return dims;\n }\n int64_t DimLookUp(absl::Span permute_dims, int64_t id) {\n return permute_dims[id];\n }\n int DimMapper(SpaceToBatchDimMap s) { return static_cast(s); }\n int64_t ReverseDimLookUp(absl::Span permute_dims, int64_t id) {\n return std::distance(permute_dims.begin(), absl::c_find(permute_dims, id));\n }\n HloInstruction* DoesConvolutionFeedReduceWindowOrSelectAndScatter(\n HloInstruction* instr, int64_t depth);\n bool DoesConvolutionFeedUnpropagatableOp(\n HloInstruction* instr, int64_t depth = kUnpropagatableOpSearchDepth);\n bool IsSpaceToBatchedSpaceSizeSuitable(HloInstruction* instr);\n private:\n HloComputation* computation_;\n absl::flat_hash_set convs_to_visit_;\n std::vector conv_visitor_list_;\n HloInstructionSet non_propagatable_instrs_;\n absl::flat_hash_map batch_to_space_map_;\n absl::flat_hash_map old_to_new_instrs_;\n absl::flat_hash_map> instr_to_dim_map_;\n absl::flat_hash_map>\n instr_to_dim_permute_map_;\n absl::flat_hash_map>\n broadcast_map_;\n bool changed_ = false;\n static constexpr int64_t kReduceWindowSearchDepth = 10;\n static constexpr int64_t kUnpropagatableOpSearchDepth = 3;\n static constexpr int64_t kMultiplierOnSpaceForBaseDilation = 3;\n absl::flat_hash_map, bool>\n unpropagatability_cache_;\n SpaceToBatchController ctrl_;\n};\nConvolutionVisitor::ConvolutionVisitor(SpaceToBatchController ctrl,\n HloComputation* computation) {\n ctrl_ = ctrl;\n computation_ = computation;\n for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {\n if (inst->opcode() != HloOpcode::kConvolution) {\n continue;\n }\n auto convolution = inst;\n if (!IsConvSuitableForSpaceToBatch(convolution)) {\n VLOG(1) << \"Conv not suitable for space-to-batch \"\n << convolution->ToString();\n continue;\n }\n VLOG(1) << \"Conv added to space-to-batch worklist \"\n << convolution->ToString();\n convs_to_visit_.insert(convolution);\n conv_visitor_list_.push_back(convolution);\n }\n}\nstd::pair, std::vector>\nConvolutionVisitor::GetSpatialDimsToSplit(HloInstruction* old_operand) {\n auto new_operand = old_to_new_instrs_[old_operand];\n auto dim_map_val = instr_to_dim_map_[old_operand];\n auto permute_dims = instr_to_dim_permute_map_[new_operand];\n std::vector old_dims(ctrl_.count_of_dimensions_to_convert),\n new_dims(ctrl_.count_of_dimensions_to_convert);\n old_dims[0] = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];\n new_dims[0] = DimLookUp(permute_dims, old_dims[0]);\n for (int i = 1; i < ctrl_.count_of_dimensions_to_convert; ++i) {\n old_dims[i] = old_dims[0] + i;\n new_dims[i] = new_dims[0] + i;\n }\n return std::make_pair(old_dims, new_dims);\n}\nbool ConvolutionVisitor::IsForwardWindowDilatedConv(\n HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) {\n const int64_t window_dilation_factor =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .window_dilation();\n if (window_dilation_factor == 1) {\n return false;\n }\n const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n return convolution->operand(1)->shape().dimensions(kernel_spatial_dim) <\n convolution->shape().dimensions(output_spatial_dim);\n}\nbool ConvolutionVisitor::IsConvSuitableForSpaceToBatch(\n HloInstruction* convolution) {\n ConvolutionDimensionNumbers dim_numbers =\n convolution->convolution_dimension_numbers();\n if (GetFirstChosenSpatialDim(convolution) < 0) {\n return false;\n }\n if (convolution->batch_group_count() != 1) {\n return false;\n }\n if (convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .window_dilation() != 1) {\n if (!IsForwardWindowDilatedConv(convolution, dim_numbers)) {\n return false;\n }\n }\n const ConvDetails c = GetConvolutionDetails(convolution, dim_numbers);\n const int64_t low_pad = convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .padding_low();\n if (c.base_dilation_factor != 1) {\n if (!ctrl_.enable_propagations_on_base_dilations) {\n return false;\n }\n if (c.stride != 1) {\n return false;\n }\n if (low_pad == 0) {\n if (c.kernel_spatial_dim_size != 1) {\n return false;\n }\n } else if (low_pad != c.base_dilation_factor - 1 &&\n low_pad != c.base_dilation_factor) {\n return false;\n }\n }\n int64_t activations_batch_dim = dim_numbers.input_batch_dimension();\n const int64_t old_batch_size =\n convolution->operand(0)->shape().dimensions(activations_batch_dim);\n if (old_batch_size > ctrl_.limit_on_batch_size) {\n return false;\n }\n VLOG(1) << \"spatial size \" << c.spatial_size << \" halo size \" << c.halo_size;\n if (c.halo_size > CeilOfRatio(c.spatial_size, ctrl_.number_of_splits)) {\n return false;\n }\n if (c.base_dilation_factor > 1 &&\n c.inherent_low_padding == c.base_dilation_factor) {\n if (c.spatial_size <\n kMultiplierOnSpaceForBaseDilation * ctrl_.number_of_splits) {\n return false;\n }\n }\n VLOG(1) << \"Legal space-to-batch convolution \" << convolution->ToString();\n return true;\n}\nbool ConvolutionVisitor::IsThisBackPropFilterConv(HloInstruction* convolution) {\n auto activations = convolution->mutable_operand(0);\n auto kernel = convolution->mutable_operand(1);\n auto dim_numbers = convolution->convolution_dimension_numbers();\n if (!old_to_new_instrs_.contains(kernel) &&\n !old_to_new_instrs_.contains(activations)) {\n return false;\n }\n if (old_to_new_instrs_.contains(kernel)) {\n auto dim_map_val_op_0 = instr_to_dim_map_[kernel];\n const int64_t old_batch_dim =\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];\n if (convolution->convolution_dimension_numbers()\n .kernel_input_feature_dimension() != old_batch_dim) {\n return false;\n }\n }\n if (old_to_new_instrs_.contains(activations)) {\n auto dim_map_val_op_0 = instr_to_dim_map_[activations];\n const int64_t old_batch_dim =\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];\n if (dim_numbers.input_feature_dimension() != old_batch_dim) {\n return false;\n }\n }\n return true;\n}\nabsl::StatusOr ConvolutionVisitor::HaloDuplicateWithSlice(\n HloInstruction* activations,\n absl::Span spatial_dimensions_to_split,\n int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size,\n HloInstruction* pad_val) {\n const int64_t spatial_dim_count = spatial_dimensions_to_split.size();\n const int64_t additional_batch_size =\n IPow(ctrl_.number_of_splits, spatial_dim_count);\n const int64_t original_batch_size =\n activations->shape().dimensions(activations_batch_dim) /\n additional_batch_size;\n const int64_t spatial_split_size =\n activations->shape().dimensions(spatial_dimensions_to_split[0]);\n const int64_t batch_size = ctrl_.number_of_splits;\n TF_ASSIGN_OR_RETURN(\n activations, SplitAndTransposeMergedBatch(\n activations, activations_batch_dim, original_batch_size,\n spatial_dimensions_to_split));\n const int64_t rank = activations->shape().rank();\n VLOG(1) << \"In HaloDuplicateWithSlice with activations \"\n << activations->ToString() << \" batch_size \" << batch_size\n << \" spatial_split_size \" << spatial_split_size << \" low_padding \"\n << low_padding << \" halo size \" << halo_size;\n CHECK_LE(std::abs(halo_size - low_padding), spatial_split_size);\n for (int64_t i = 0; i < spatial_dimensions_to_split.size(); ++i) {\n int64_t spatial_dimension_to_split = activations_batch_dim + 2 * (i + 1);\n int64_t remapped_batch_dimension = spatial_dimension_to_split - 1;\n HloInstruction* first_slice = nullptr;\n std::vector strides(rank, 1);\n HloInstruction* padding =\n pad_val == nullptr\n ? activations->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(activations->shape().element_type())))\n : pad_val;\n if (low_padding > 0) {\n std::vector start_indices(rank, 0),\n end_indices(activations->shape().dimensions().begin(),\n activations->shape().dimensions().end());\n start_indices[spatial_dimension_to_split] =\n spatial_split_size - low_padding;\n end_indices[remapped_batch_dimension] = batch_size - 1;\n end_indices[spatial_dimension_to_split] = spatial_split_size;\n TF_ASSIGN_OR_RETURN(first_slice,\n MakeSliceHlo(activations, start_indices, end_indices,\n strides, &activations->metadata(),\n &activations->frontend_attributes()));\n VLOG(1) << \"first slice \" << first_slice->ToString();\n PaddingConfig padding_config =\n MakeNoPaddingConfig(first_slice->shape().dimensions_size());\n padding_config.mutable_dimensions(remapped_batch_dimension)\n ->set_edge_padding_low(1);\n TF_ASSIGN_OR_RETURN(first_slice,\n MakePadHlo(first_slice, padding, padding_config,\n &first_slice->metadata(),\n &first_slice->frontend_attributes()));\n }\n HloInstruction* halo_region = nullptr;\n if (halo_size - low_padding > 0) {\n std::vector start_indices_halo(rank, 0),\n end_indices_halo(activations->shape().dimensions().begin(),\n activations->shape().dimensions().end());\n start_indices_halo[remapped_batch_dimension] = 1;\n end_indices_halo[spatial_dimension_to_split] = halo_size - low_padding;\n TF_ASSIGN_OR_RETURN(\n halo_region,\n MakeSliceHlo(activations, start_indices_halo, end_indices_halo,\n strides, &activations->metadata(),\n &activations->frontend_attributes()));\n VLOG(1) << \"halo_region \" << halo_region->ToString();\n PaddingConfig padding_config_halo =\n MakeNoPaddingConfig(halo_region->shape().dimensions_size());\n padding_config_halo.mutable_dimensions(remapped_batch_dimension)\n ->set_edge_padding_high(1);\n TF_ASSIGN_OR_RETURN(halo_region,\n MakePadHlo(halo_region, padding, padding_config_halo,\n &halo_region->metadata(),\n &halo_region->frontend_attributes()));\n }\n if ((halo_size == 0 && low_padding != 0) || low_padding < 0) {\n std::vector start_indices_activations_cut(rank, 0),\n end_indices_activations_cut(activations->shape().dimensions().begin(),\n activations->shape().dimensions().end());\n if (low_padding > 0) {\n end_indices_activations_cut[spatial_dimension_to_split] =\n spatial_split_size - low_padding;\n } else {\n start_indices_activations_cut[spatial_dimension_to_split] =\n 0 - low_padding;\n end_indices_activations_cut[spatial_dimension_to_split] =\n spatial_split_size;\n }\n TF_ASSIGN_OR_RETURN(\n activations, MakeSliceHlo(activations, start_indices_activations_cut,\n end_indices_activations_cut, strides,\n &activations->metadata(),\n &activations->frontend_attributes()));\n }\n if (first_slice != nullptr) {\n TF_ASSIGN_OR_RETURN(\n activations,\n MakeConcatHlo({first_slice, activations}, spatial_dimension_to_split,\n &activations->metadata(),\n &activations->frontend_attributes()));\n }\n if (halo_region != nullptr) {\n TF_ASSIGN_OR_RETURN(\n activations,\n MakeConcatHlo({activations, halo_region}, spatial_dimension_to_split,\n &activations->metadata(),\n &activations->frontend_attributes()));\n }\n }\n TF_ASSIGN_OR_RETURN(\n activations,\n TransposeAndMergeBatch(\n activations,\n spatial_dimensions_to_split,\n activations_batch_dim, original_batch_size));\n VLOG(1) << \"HaloDuplicated activations \" << activations->ToString();\n return activations;\n}\nabsl::StatusOr\nConvolutionVisitor::BringSpaceNextToBatch(\n HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,\n int64_t& activations_batch_dim,\n std::vector* spatial_dimensions_to_split, bool is_backprop,\n bool is_rhs) {\n for (int64_t i = 1; i < spatial_dimensions_to_split->size(); ++i) {\n CHECK_EQ(spatial_dimensions_to_split->at(i),\n spatial_dimensions_to_split->at(i - 1) + 1)\n << \"Spatial dimensions are not contiguous\";\n }\n int64_t spatial_dimension_to_split = spatial_dimensions_to_split->at(0);\n std::vector transpose_dims(activations->shape().rank());\n if (spatial_dimension_to_split == activations_batch_dim + 1) {\n absl::c_iota(transpose_dims, 0);\n } else {\n ConvolutionDimensionNumbers new_dim_numbers = dim_numbers;\n int64_t pushed_counter = 0;\n int64_t new_batch_dim, new_spatial_dim;\n int64_t dim_counter = 0;\n if (is_rhs) {\n CHECK(is_backprop);\n for (int i = 0; i < activations->shape().rank(); ++i) {\n if (i == activations_batch_dim) {\n continue;\n }\n if (i == spatial_dimension_to_split) {\n transpose_dims[dim_counter++] = activations_batch_dim;\n new_batch_dim = pushed_counter;\n pushed_counter++;\n new_spatial_dim = pushed_counter;\n }\n if (i == dim_numbers.kernel_output_feature_dimension()) {\n new_dim_numbers.set_kernel_output_feature_dimension(pushed_counter);\n } else {\n auto it = absl::c_find(dim_numbers.kernel_spatial_dimensions(), i);\n if (it != dim_numbers.kernel_spatial_dimensions().end()) {\n int64_t j = it - dim_numbers.kernel_spatial_dimensions().begin();\n new_dim_numbers.set_kernel_spatial_dimensions(j, pushed_counter);\n }\n }\n transpose_dims[dim_counter++] = i;\n pushed_counter++;\n }\n activations_batch_dim = new_batch_dim;\n spatial_dimension_to_split = new_spatial_dim;\n TF_ASSIGN_OR_RETURN(activations,\n MakeTransposeHlo(activations, transpose_dims));\n new_dim_numbers.set_kernel_input_feature_dimension(activations_batch_dim);\n } else {\n for (int i = 0; i < activations->shape().rank(); ++i) {\n if (i == activations_batch_dim) {\n continue;\n }\n if (i == spatial_dimension_to_split) {\n transpose_dims[dim_counter++] = activations_batch_dim;\n new_batch_dim = pushed_counter;\n pushed_counter++;\n new_spatial_dim = pushed_counter;\n }\n if (is_backprop && i == dim_numbers.input_batch_dimension()) {\n new_dim_numbers.set_input_batch_dimension(pushed_counter);\n } else if (i == dim_numbers.input_feature_dimension()) {\n new_dim_numbers.set_input_feature_dimension(pushed_counter);\n } else {\n auto it = absl::c_find(dim_numbers.input_spatial_dimensions(), i);\n if (it != dim_numbers.input_spatial_dimensions().end()) {\n int64_t j = it - dim_numbers.input_spatial_dimensions().begin();\n new_dim_numbers.set_input_spatial_dimensions(j, pushed_counter);\n }\n }\n transpose_dims[dim_counter++] = i;\n pushed_counter++;\n }\n activations_batch_dim = new_batch_dim;\n spatial_dimension_to_split = new_spatial_dim;\n TF_ASSIGN_OR_RETURN(activations,\n MakeTransposeHlo(activations, transpose_dims));\n if (is_backprop) {\n new_dim_numbers.set_input_feature_dimension(activations_batch_dim);\n } else {\n new_dim_numbers.set_input_batch_dimension(activations_batch_dim);\n }\n }\n dim_numbers = new_dim_numbers;\n }\n for (int64_t i = 0; i < spatial_dimensions_to_split->size(); ++i) {\n (*spatial_dimensions_to_split)[i] = spatial_dimension_to_split + i;\n }\n return SpaceNextToBatchDetails{activations, transpose_dims};\n}\nabsl::StatusOr\nConvolutionVisitor::SplitAndTransposeMergedBatch(\n HloInstruction* activations, int64_t batch_dimension,\n int64_t old_batch_size, absl::Span spatial_dimensions) {\n CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]);\n std::vector new_dimensions(activations->shape().dimensions().begin(),\n activations->shape().dimensions().end());\n const int64_t new_batch_size =\n activations->shape().dimensions(batch_dimension);\n VLOG(3) << \"Decreasing the spatial size while propagating new_batch_size \"\n << new_batch_size << \" old_batch_size \" << old_batch_size;\n new_dimensions[batch_dimension] = old_batch_size;\n const int64_t spatial_dim_count = spatial_dimensions.size();\n for (int64_t i = 0; i < spatial_dim_count; ++i) {\n new_dimensions.insert(new_dimensions.begin() + spatial_dimensions[0],\n ctrl_.number_of_splits);\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * batch_split_activations,\n MakeReshapeHlo(new_dimensions, activations));\n if (spatial_dim_count > 1) {\n std::vector transpose_dims(new_dimensions.size());\n absl::c_iota(transpose_dims, 0);\n std::vector trans_dims(new_dimensions.size());\n absl::c_iota(trans_dims, 0);\n int64_t start_batch_dim_position = batch_dimension + 1;\n int64_t start_space_dim_position = batch_dimension + 2;\n for (int i = 0; i < spatial_dim_count; ++i) {\n transpose_dims[start_batch_dim_position + 2 * i] =\n batch_dimension + spatial_dim_count - i;\n transpose_dims[start_space_dim_position + 2 * i] =\n batch_dimension + spatial_dim_count + 1 + i;\n }\n TF_ASSIGN_OR_RETURN(\n batch_split_activations,\n MakeTransposeHlo(batch_split_activations, transpose_dims));\n }\n return batch_split_activations;\n}\nabsl::StatusOr\nConvolutionVisitor::ChangeSpatialSizeOnSpaceToBatchedShape(\n HloInstruction* activations, int64_t batch_dimension,\n int64_t old_batch_size, absl::Span spatial_dimensions,\n int64_t new_spatial_dim_size, bool increase_spatial_size) {\n CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]);\n std::vector new_dimensions(activations->shape().dimensions().begin(),\n activations->shape().dimensions().end());\n const int64_t spatial_dim_count = spatial_dimensions.size();\n const int64_t spatial_dim_size =\n activations->shape().dimensions(spatial_dimensions[0]);\n const int64_t reshaped_space_size = spatial_dim_size * ctrl_.number_of_splits;\n TF_ASSIGN_OR_RETURN(\n HloInstruction * batch_split_activations,\n SplitAndTransposeMergedBatch(activations, batch_dimension, old_batch_size,\n spatial_dimensions));\n std::vector batch_space_collapse_reshape_dims(\n batch_split_activations->shape().dimensions().begin(),\n batch_split_activations->shape().dimensions().end());\n batch_space_collapse_reshape_dims.erase(\n batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0],\n batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0] +\n spatial_dim_count);\n for (auto spatial_dimension : spatial_dimensions) {\n batch_space_collapse_reshape_dims[spatial_dimension] = reshaped_space_size;\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * batch_space_collapsed_reshape,\n MakeReshapeHlo(batch_space_collapse_reshape_dims,\n batch_split_activations));\n VLOG(3) << \"First reshape done\";\n const int64_t rank = activations->shape().rank();\n if (increase_spatial_size) {\n PaddingConfig padding_config = MakeNoPaddingConfig(\n batch_space_collapsed_reshape->shape().dimensions_size());\n for (auto spatial_dimension : spatial_dimensions) {\n padding_config.mutable_dimensions(spatial_dimension)\n ->set_edge_padding_high(new_spatial_dim_size *\n ctrl_.number_of_splits -\n reshaped_space_size);\n padding_config.mutable_dimensions(spatial_dimension)\n ->set_edge_padding_low(0);\n }\n HloInstruction* padding = activations->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::Zero(\n batch_space_collapsed_reshape->shape().element_type())));\n TF_ASSIGN_OR_RETURN(\n batch_space_collapsed_reshape,\n MakePadHlo(batch_space_collapsed_reshape, padding, padding_config,\n &batch_space_collapsed_reshape->metadata(),\n &batch_space_collapsed_reshape->frontend_attributes()));\n } else {\n std::vector start_indices(rank, 0),\n end_indices(batch_space_collapsed_reshape->shape().dimensions().begin(),\n batch_space_collapsed_reshape->shape().dimensions().end()),\n strides(rank, 1);\n for (auto spatial_dimension : spatial_dimensions) {\n end_indices[spatial_dimension] =\n new_spatial_dim_size * ctrl_.number_of_splits;\n }\n TF_ASSIGN_OR_RETURN(\n batch_space_collapsed_reshape,\n MakeSliceHlo(batch_space_collapsed_reshape, start_indices, end_indices,\n strides, &batch_space_collapsed_reshape->metadata(),\n &batch_space_collapsed_reshape->frontend_attributes()));\n }\n TF_ASSIGN_OR_RETURN(\n HloInstruction * activations_new,\n PerformSplitSpace(batch_space_collapsed_reshape, spatial_dimensions,\n batch_dimension, new_spatial_dim_size,\n ctrl_.number_of_splits));\n VLOG(3) << \"Size decreased activations \" << activations_new->ToString();\n return activations_new;\n}\nabsl::StatusOr ConvolutionVisitor::Run() {\n for (auto conv : conv_visitor_list_) {\n if (ctrl_.disable_starting_on_small_chains &&\n DoesConvolutionFeedUnpropagatableOp(conv)) {\n VLOG(1) << \"Giving up on conv \" << conv->ToString()\n << \" because it feeds an unpropagatable op\";\n convs_to_visit_.erase(conv);\n }\n if (convs_to_visit_.count(conv) > 0) {\n TF_CHECK_OK(PerformSpaceToBatchOnConvolution(conv));\n changed_ = true;\n }\n }\n conv_visitor_list_.clear();\n convs_to_visit_.clear();\n for (auto instr : non_propagatable_instrs_) {\n if (instr->opcode() == HloOpcode::kConvolution) {\n VLOG(1) << \"Instr \" << instr->ToString();\n }\n if (instr->opcode() == HloOpcode::kConvolution &&\n !IsConvSuitableForSpaceToBatch(instr)) {\n HloInstruction* producer = nullptr;\n if (old_to_new_instrs_.contains(instr->mutable_operand(0))) {\n producer = instr->mutable_operand(0);\n } else if (old_to_new_instrs_.contains(instr->mutable_operand(1))) {\n producer = instr->mutable_operand(1);\n }\n if (producer) {\n if (CanPropagate(instr, producer)) {\n bool needs_further_propagation;\n TF_ASSIGN_OR_RETURN(needs_further_propagation,\n Propagate(instr, producer));\n TF_CHECK_OK(computation_->ReplaceInstruction(\n instr, old_to_new_instrs_[instr]));\n continue;\n }\n }\n }\n VLOG(1) << \"Could not eventually propagate through \" << instr->ToString();\n absl::flat_hash_map operand_map;\n for (int64_t i = 0; i < instr->operand_count(); ++i) {\n if (old_to_new_instrs_.count(instr->mutable_operand(i))) {\n TF_ASSIGN_OR_RETURN(operand_map[i],\n BatchToSpace(instr->mutable_operand(i)));\n }\n }\n for (auto entry : operand_map) {\n TF_CHECK_OK(instr->ReplaceOperandWith(entry.first, entry.second));\n }\n }\n non_propagatable_instrs_.clear();\n return changed_;\n}\nbool IsTrivialElementwise(HloInstruction* hlo) {\n if (hlo->opcode() == HloOpcode::kFusion || hlo->opcode() == HloOpcode::kRng ||\n hlo->opcode() == HloOpcode::kCopy ||\n hlo->opcode() == HloOpcode::kConstant ||\n hlo->opcode() == HloOpcode::kIota || hlo->opcode() == HloOpcode::kMap) {\n return false;\n }\n return hlo->IsElementwise();\n}\nbool ConvolutionVisitor::CanPropagate(HloInstruction* consumer,\n HloInstruction* producer) {\n if (IsTrivialElementwise(consumer)) {\n VLOG(2) << \"Doing propagation check on elementwise op: \"\n << consumer->ToString();\n HloInstruction* pivot_operand = nullptr;\n for (int64_t i = 0; i < consumer->operand_count(); ++i) {\n auto old_producer = consumer->mutable_operand(i);\n std::vector to_transform;\n const bool broadcast_or_constant =\n (old_producer->opcode() == HloOpcode::kConstant) ||\n (old_producer->opcode() == HloOpcode::kBroadcast &&\n IsBroadcastPropagatable(old_producer, producer)) ||\n (consumer->IsElementwiseBinary() &&\n old_producer->opcode() == HloOpcode::kBroadcast &&\n IsBroadcastTree(old_producer, producer, to_transform));\n if (!old_to_new_instrs_.contains(old_producer) &&\n !broadcast_or_constant) {\n VLOG(1) << \"Cannot propagate on elementwise op \" << consumer->ToString()\n << \" because operand \" << old_producer->ToString()\n << \" isn't ready \";\n return false;\n } else {\n if (broadcast_or_constant) {\n VLOG(2) << \"Skipping on \" << old_producer->ToString();\n continue;\n }\n CHECK(old_to_new_instrs_.contains(old_producer));\n CHECK(instr_to_dim_map_.contains(old_producer));\n if (pivot_operand == nullptr) {\n pivot_operand = old_producer;\n VLOG(2) << \"Elementwise op: pivot \" << old_producer->ToString();\n } else {\n if (instr_to_dim_map_[pivot_operand]\n [DimMapper(SpaceToBatchDimMap::kBatch)] !=\n instr_to_dim_map_[old_producer]\n [DimMapper(SpaceToBatchDimMap::kBatch)] ||\n instr_to_dim_map_[pivot_operand]\n [DimMapper(SpaceToBatchDimMap::kSpace0)] !=\n instr_to_dim_map_[old_producer]\n [DimMapper(SpaceToBatchDimMap::kSpace0)]) {\n VLOG(2) << \"Elementwise op: checking for shape equivalence \"\n << consumer->ToString()\n << \" failed due to changed batch space ordering \";\n return false;\n }\n auto pivot_new_instr = old_to_new_instrs_[pivot_operand];\n auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr];\n auto new_instr = old_to_new_instrs_[old_producer];\n auto permute_dims = instr_to_dim_permute_map_[new_instr];\n for (int j = 0; j < pivot_permute_dims.size(); ++j) {\n if (pivot_permute_dims[j] != permute_dims[j]) {\n VLOG(2) << \"Elementwise op: checking for shape equivalence \"\n << consumer->ToString()\n << \" failed due to permuted dimensions \";\n return false;\n }\n if (pivot_new_instr->shape().dimensions(j) !=\n new_instr->shape().dimensions(j)) {\n if (!((consumer->IsElementwiseBinary() ||\n consumer->opcode() == HloOpcode::kSelect) &&\n j == instr_to_dim_map_[pivot_operand][DimMapper(\n SpaceToBatchDimMap::kSpace0)])) {\n VLOG(2) << \"Elementwise op: checking for shape equivalence \"\n << consumer->ToString()\n << \" failed due to changed shape sizes \";\n return false;\n }\n }\n }\n }\n }\n }\n }\n if (consumer->opcode() == HloOpcode::kConcatenate) {\n for (int64_t i = 0; i < consumer->operand_count(); ++i) {\n if (!instr_to_dim_map_.contains(consumer->mutable_operand(i))) {\n return false;\n }\n }\n auto pivot_operand = consumer->mutable_operand(0);\n auto pivot_new_instr = old_to_new_instrs_[pivot_operand];\n auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr];\n for (int64_t i = 1; i < consumer->operand_count(); ++i) {\n auto new_instr = old_to_new_instrs_[consumer->mutable_operand(i)];\n auto permute_dims = instr_to_dim_permute_map_[new_instr];\n for (int j = 0; j < pivot_permute_dims.size(); ++j) {\n if (pivot_permute_dims[j] != permute_dims[j]) {\n VLOG(2) << \"Concat op: checking for shape equivalence \"\n << consumer->ToString()\n << \" failed due to permuted dimensions \";\n return false;\n }\n if (pivot_new_instr->shape().dimensions(j) !=\n new_instr->shape().dimensions(j)) {\n VLOG(2) << \"Concat op: checking for shape equivalence \"\n << consumer->ToString()\n << \" failed due to changed shape sizes \";\n return false;\n }\n }\n }\n return true;\n }\n if (consumer->opcode() == HloOpcode::kConvolution) {\n if (!ConsumeFuel(\"space-to-batch-converter\", [&] {\n return \"Skipping space-to-batch propagation because fuel over\\n\";\n })) {\n return false;\n }\n auto are_conv_dims_compatible =\n [&](const ConvolutionDimensionNumbers dim_numbers,\n std::vector& dim_map, bool check_lhs) {\n if (check_lhs) {\n if (dim_numbers.input_spatial_dimensions(\n GetFirstChosenSpatialDim(consumer)) !=\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) {\n return false;\n }\n for (int i = 0; i < dim_numbers.input_spatial_dimensions().size();\n ++i) {\n if (dim_numbers.input_spatial_dimensions(i) ==\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] ||\n dim_numbers.input_spatial_dimensions(i) ==\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) {\n return false;\n }\n }\n } else {\n if (dim_numbers.kernel_spatial_dimensions(\n GetFirstChosenSpatialDim(consumer)) !=\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) {\n return false;\n }\n for (int i = 0; i < dim_numbers.kernel_spatial_dimensions().size();\n ++i) {\n if (dim_numbers.kernel_spatial_dimensions(i) ==\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] ||\n dim_numbers.kernel_spatial_dimensions(i) ==\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) {\n return false;\n }\n }\n }\n return true;\n };\n VLOG(1) << \"Checking if conv is supported for propagation \"\n << consumer->ToString();\n bool found_good_non_window_dilated_conv = true;\n if (IsConvSuitableForSpaceToBatch(consumer)) {\n if (!old_to_new_instrs_.contains(consumer->mutable_operand(0))) {\n found_good_non_window_dilated_conv = false;\n }\n ConvolutionDimensionNumbers dim_numbers =\n consumer->convolution_dimension_numbers();\n ConvDetails c = GetConvolutionDetails(consumer, dim_numbers);\n auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));\n std::vector new_spatial_dims = retval.second;\n auto new_activations = old_to_new_instrs_[consumer->mutable_operand(0)];\n if (new_activations->shape().dimensions(retval.second[0]) <\n c.inherent_low_padding) {\n return false;\n }\n auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)];\n if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),\n dim_map_val_op_0, true)) {\n found_good_non_window_dilated_conv = false;\n }\n if (consumer->convolution_dimension_numbers().input_batch_dimension() !=\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]) {\n found_good_non_window_dilated_conv = false;\n }\n if (found_good_non_window_dilated_conv) {\n return true;\n }\n }\n if (!ctrl_.enable_propagations_on_window_dilations) {\n return false;\n }\n if (!IsThisBackPropFilterConv(consumer)) {\n return false;\n }\n if (GetFirstChosenSpatialDim(consumer) < 0) {\n return false;\n }\n if (consumer->window()\n .dimensions(GetFirstChosenSpatialDim(consumer))\n .stride() != 1) {\n return false;\n }\n if (consumer->feature_group_count() != 1) {\n return false;\n }\n VLOG(2) << \"Checking for backprop filter conv propagatability\";\n CHECK_EQ(consumer->operand_count(), 2);\n auto activations = consumer->mutable_operand(0);\n auto kernel = consumer->mutable_operand(1);\n auto win_dims =\n consumer->window().dimensions(GetFirstChosenSpatialDim(consumer));\n const int64_t rhs_dilation = win_dims.window_dilation();\n const int64_t lhs_dilation = win_dims.base_dilation();\n if (lhs_dilation != 1) {\n return false;\n }\n if (rhs_dilation == 1 &&\n !ctrl_.enable_propagations_on_trivial_window_dilations) {\n if (!old_to_new_instrs_.contains(kernel) ||\n !old_to_new_instrs_.contains(activations)) {\n return false;\n }\n }\n if (!old_to_new_instrs_.contains(kernel)) {\n const int64_t rhs_batch =\n kernel->shape().dimensions(consumer->convolution_dimension_numbers()\n .kernel_input_feature_dimension());\n auto dim_map_val_op_0 = instr_to_dim_map_[activations];\n const int64_t old_batch_dim =\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)];\n auto first_operand = old_to_new_instrs_[activations];\n auto permute_dims_first_operand =\n instr_to_dim_permute_map_[first_operand];\n const int64_t new_batch_dim =\n DimLookUp(permute_dims_first_operand, old_batch_dim);\n const int64_t new_space_dim =\n DimLookUp(permute_dims_first_operand, old_space_dim);\n const int64_t lhs_batch =\n first_operand->shape().dimensions(new_batch_dim);\n if (first_operand->shape().dimensions(new_space_dim) % rhs_dilation !=\n 0) {\n return false;\n }\n if (rhs_batch * ctrl_.number_of_splits != lhs_batch) {\n return false;\n }\n if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),\n dim_map_val_op_0, true)) {\n return false;\n }\n VLOG(2)\n << \"Backprop filter conv ready for propagation: activations ready, \"\n \" kernel will be space-to-batched\";\n return true;\n }\n if (!old_to_new_instrs_.contains(activations)) {\n const int64_t lhs_batch = activations->shape().dimensions(\n consumer->convolution_dimension_numbers().input_feature_dimension());\n auto dim_map_val_op_1 = instr_to_dim_map_[consumer->mutable_operand(1)];\n const int64_t old_batch_dim =\n dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)];\n auto second_operand = old_to_new_instrs_[kernel];\n auto permute_dims_second_operand =\n instr_to_dim_permute_map_[second_operand];\n const int64_t new_batch_dim =\n DimLookUp(permute_dims_second_operand, old_batch_dim);\n const int64_t rhs_batch =\n second_operand->shape().dimensions(new_batch_dim);\n if (rhs_batch != ctrl_.number_of_splits * lhs_batch) {\n return false;\n }\n if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),\n dim_map_val_op_1, false)) {\n return false;\n }\n VLOG(2) << \"Backprop filter conv ready for propagation: kernel ready, \"\n \" activations will be space-to-batched\";\n return true;\n }\n auto first_operand = old_to_new_instrs_[activations];\n auto dim_map_val_op_0 = instr_to_dim_map_[activations];\n auto second_operand = old_to_new_instrs_[kernel];\n auto dim_map_val_op_1 = instr_to_dim_map_[kernel];\n auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];\n auto permute_dims_second_operand =\n instr_to_dim_permute_map_[second_operand];\n const int64_t new_batch_dim_operand_0 =\n DimLookUp(permute_dims_first_operand,\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]);\n const int64_t new_space_dim_operand_0 =\n DimLookUp(permute_dims_first_operand,\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)]);\n const int64_t new_batch_dim_operand_1 =\n DimLookUp(permute_dims_second_operand,\n dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)]);\n const int64_t new_space_dim_operand_1 =\n DimLookUp(permute_dims_second_operand,\n dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kSpace0)]);\n if (first_operand->shape().dimensions(new_batch_dim_operand_0) !=\n second_operand->shape().dimensions(new_batch_dim_operand_1)) {\n VLOG(2) << \"Backprop filter conv not ready for propagation because batch \"\n \"dimensions don't line up\";\n return false;\n }\n if (first_operand->shape().dimensions(new_space_dim_operand_0) >\n rhs_dilation *\n second_operand->shape().dimensions(new_space_dim_operand_1)) {\n VLOG(2) << \"Backprop filter conv not ready for propagation because of \"\n \"dilation factor mismatch\";\n return false;\n }\n if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),\n dim_map_val_op_0, true)) {\n return false;\n }\n if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(),\n dim_map_val_op_1, false)) {\n return false;\n }\n VLOG(2) << \"Backprop filter conv ready for propagation\";\n return true;\n }\n if (consumer->opcode() == HloOpcode::kReduceWindow ||\n consumer->opcode() == HloOpcode::kReduce) {\n for (int64_t i = 0; i < consumer->operand_count(); ++i) {\n auto old_producer = consumer->mutable_operand(i);\n if (i == 0 && !old_to_new_instrs_.contains(old_producer)) {\n return false;\n }\n }\n if (consumer->opcode() == HloOpcode::kReduceWindow) {\n return IsSpaceToBatchedSpaceSizeSuitable(consumer);\n }\n }\n if (consumer->opcode() == HloOpcode::kSelectAndScatter) {\n for (int64_t i = 0; i < consumer->operand_count(); ++i) {\n auto old_producer = consumer->mutable_operand(i);\n if (i < 2 && !old_to_new_instrs_.contains(old_producer)) {\n return false;\n }\n }\n auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];\n auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)];\n auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)];\n auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];\n auto permute_dims_second_operand =\n instr_to_dim_permute_map_[second_operand];\n if (permute_dims_first_operand != permute_dims_second_operand) {\n VLOG(2) << \"Can't propagate through select and scatter due to \"\n \"permutation mismatch\";\n return false;\n }\n const int64_t old_batch_dim =\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)];\n const int64_t new_batch_dim =\n DimLookUp(permute_dims_first_operand, old_batch_dim);\n const int64_t new_space_dim =\n DimLookUp(permute_dims_first_operand, old_space_dim);\n if (first_operand->shape().dimensions(new_batch_dim) !=\n second_operand->shape().dimensions(new_batch_dim)) {\n VLOG(2)\n << \"Can't propagate through select and scatter due to dim mismatch\";\n return false;\n }\n const int64_t stride =\n consumer->window().dimensions(old_space_dim).stride();\n const int64_t pad_high =\n consumer->window().dimensions(old_space_dim).padding_high();\n const int64_t pad_low =\n consumer->window().dimensions(old_space_dim).padding_low();\n if ((first_operand->shape().dimensions(new_space_dim) + pad_high +\n pad_low) /\n stride !=\n second_operand->shape().dimensions(new_space_dim)) {\n VLOG(2) << \"Can't propagate through select and scatter due to stride \"\n \"mismatch\";\n return false;\n }\n return IsSpaceToBatchedSpaceSizeSuitable(consumer);\n }\n return true;\n}\nvoid ConvolutionVisitor::PropagateOnBroadcast(HloInstruction* consumer,\n HloInstruction* producer) {\n auto new_producer = old_to_new_instrs_[producer];\n auto permute_dims = instr_to_dim_permute_map_[new_producer];\n auto dim_map_val = instr_to_dim_map_[producer];\n const int64_t old_batch_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];\n auto orig_broadcast_dims = consumer->dimensions();\n bool batch_is_broadcasted =\n absl::c_linear_search(orig_broadcast_dims, old_batch_dim);\n const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);\n const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim);\n bool map_found = broadcast_map_.contains(consumer);\n if (map_found) {\n for (auto previous_broadcast : broadcast_map_[consumer]) {\n if (ShapeUtil::CompatibleIgnoringElementType(previous_broadcast->shape(),\n new_producer->shape())) {\n return;\n }\n }\n }\n std::vector final_shape_dims(\n new_producer->shape().dimensions().begin(),\n new_producer->shape().dimensions().end());\n if (batch_is_broadcasted) {\n final_shape_dims[new_batch_dim] =\n producer->shape().dimensions(old_batch_dim);\n final_shape_dims[new_space_dim] *= ctrl_.number_of_splits;\n }\n std::vector broadcast_dims;\n const auto& dimensions = consumer->dimensions();\n broadcast_dims.reserve(dimensions.size());\n for (auto j : dimensions) {\n broadcast_dims.push_back(DimLookUp(permute_dims, j));\n }\n auto new_broadcast = MakeBroadcastHlo(\n consumer->mutable_operand(0), broadcast_dims, final_shape_dims,\n &consumer->metadata(), &consumer->frontend_attributes());\n VLOG(1) << \"Created broadcast \" << new_broadcast->ToString();\n if (batch_is_broadcasted) {\n new_broadcast =\n MakeReshapeHlo(new_producer->shape().dimensions(), new_broadcast)\n .value();\n VLOG(2) << \"Created reshape of broadcast \" << new_broadcast->ToString();\n }\n if (!map_found) {\n absl::flat_hash_set set_of_broadcasts;\n broadcast_map_[consumer] = set_of_broadcasts;\n }\n broadcast_map_[consumer].insert(new_broadcast);\n}\nvoid ConvolutionVisitor::RewriteBroadcastTree(\n HloInstruction* producer,\n std::vector& instructions_to_transform) {\n CHECK(old_to_new_instrs_.contains(producer));\n for (auto instr : instructions_to_transform) {\n if (instr->opcode() == HloOpcode::kBroadcast) {\n PropagateOnBroadcast(instr, producer);\n } else if (IsTrivialElementwise(instr)) {\n Propagate(instr, instr->mutable_operand(0)).value();\n } else {\n LOG(FATAL) << \"Unsupported opcode in RewriteBroadcastTree\";\n }\n }\n}\nbool ConvolutionVisitor::IsBroadcastTree(\n HloInstruction* op, HloInstruction* consumer,\n std::vector& instructions_to_transform) {\n if (op->opcode() == HloOpcode::kBroadcast) {\n if (IsBroadcastPropagatable(op, consumer)) {\n instructions_to_transform.push_back(op);\n return true;\n } else {\n return false;\n }\n }\n if (Match(op, m::ConstantScalar())) {\n return true;\n }\n if (!IsTrivialElementwise(op)) {\n return false;\n }\n for (int64_t i = 0; i < op->operand_count(); ++i) {\n if (!IsBroadcastTree(op->mutable_operand(i), consumer,\n instructions_to_transform)) {\n return false;\n }\n }\n instructions_to_transform.push_back(op);\n return true;\n}\nbool ConvolutionVisitor::IsBroadcastPropagatable(HloInstruction* broadcast,\n HloInstruction* old_other_op) {\n CHECK_EQ(broadcast->opcode(), HloOpcode::kBroadcast);\n CHECK(instr_to_dim_map_.contains(old_other_op));\n auto result = instr_to_dim_map_[old_other_op];\n const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n auto broadcast_dims = broadcast->dimensions();\n return !absl::c_linear_search(broadcast_dims, space_dim);\n}\nbool ConvolutionVisitor::IsOpcodeNonPropagatable(HloInstruction* consumer) {\n switch (consumer->opcode()) {\n case HloOpcode::kCustomCall:\n return true;\n default:\n return false;\n }\n}\nbool ConvolutionVisitor::SupportedDotForPropagation(HloInstruction* consumer,\n HloInstruction* producer) {\n if (consumer->opcode() != HloOpcode::kDot) {\n return false;\n }\n auto operand = consumer->mutable_operand(0);\n if (operand != producer || !instr_to_dim_map_.contains(operand)) {\n return false;\n }\n const auto& dnums = consumer->dot_dimension_numbers();\n const auto& contracting_dims = dnums.lhs_contracting_dimensions();\n const auto& batch_dims = dnums.lhs_batch_dimensions();\n auto result = instr_to_dim_map_[operand];\n const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n const int64_t old_feature_dim =\n result[DimMapper(SpaceToBatchDimMap::kFeature)];\n if (consumer->operand(1)->shape().rank() ==\n batch_dims.size() + contracting_dims.size()) {\n return false;\n }\n bool found = false;\n for (auto dim : batch_dims) {\n if (dim == old_batch_dim || dim == old_space_dim) {\n return false;\n }\n if (dim == old_feature_dim) {\n found = true;\n }\n }\n if (!found) {\n return false;\n }\n for (auto dim : contracting_dims) {\n if (dim == old_batch_dim || dim == old_space_dim) {\n return false;\n }\n }\n return true;\n}\nbool ConvolutionVisitor::SupportedOpForPropagation(HloInstruction* consumer,\n HloInstruction* producer) {\n if (IsOpcodeNonPropagatable(consumer)) {\n return false;\n }\n if (IsTrivialElementwise(consumer)) {\n for (int64_t i = 0; i < consumer->operand_count(); ++i) {\n if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {\n if (!IsBroadcastPropagatable(consumer->mutable_operand(i), producer)) {\n VLOG(2) << \"Could not propagate through broadcast\";\n return false;\n }\n }\n }\n return true;\n }\n if (consumer->opcode() == HloOpcode::kConvolution) {\n return true;\n }\n if (consumer->opcode() == HloOpcode::kConcatenate) {\n HloInstruction* pivot_operand = nullptr;\n for (int64_t i = 0; i < consumer->operand_count(); ++i) {\n if (instr_to_dim_map_.contains(consumer->mutable_operand(i))) {\n pivot_operand = consumer->mutable_operand(i);\n break;\n }\n }\n if (pivot_operand == nullptr) {\n VLOG(1) << \"Concat: Dim map not found on any operand\";\n return false;\n }\n auto result = instr_to_dim_map_[pivot_operand];\n const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n if (consumer->concatenate_dimension() == old_batch_dim ||\n consumer->concatenate_dimension() == old_space_dim) {\n return false;\n }\n return true;\n }\n if (consumer->opcode() == HloOpcode::kReverse) {\n auto operand_0 = consumer->mutable_operand(0);\n if (!instr_to_dim_map_.contains(operand_0)) {\n return false;\n }\n auto result = instr_to_dim_map_[operand_0];\n const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n for (auto dim : consumer->dimensions()) {\n if (dim == old_batch_dim || dim == old_space_dim) {\n return false;\n }\n }\n return true;\n }\n if (consumer->opcode() == HloOpcode::kTranspose) {\n return true;\n }\n if (consumer->opcode() == HloOpcode::kPad) {\n auto operand_0 = consumer->mutable_operand(0);\n if (!instr_to_dim_map_.contains(operand_0)) {\n return false;\n }\n auto result = instr_to_dim_map_[operand_0];\n const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n auto does_dim_have_padding = [](PaddingConfig padding_config, int64_t dim) {\n return padding_config.dimensions(dim).edge_padding_low() != 0 ||\n padding_config.dimensions(dim).edge_padding_high() != 0 ||\n padding_config.dimensions(dim).interior_padding() != 0;\n };\n if (does_dim_have_padding(consumer->padding_config(), old_batch_dim) ||\n does_dim_have_padding(consumer->padding_config(), old_space_dim)) {\n return false;\n }\n return true;\n }\n if (consumer->opcode() == HloOpcode::kSlice) {\n auto operand = consumer->mutable_operand(0);\n if (!instr_to_dim_map_.contains(operand)) {\n return false;\n }\n auto result = instr_to_dim_map_[operand];\n const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n if (consumer->shape().dimensions(old_batch_dim) !=\n operand->shape().dimensions(old_batch_dim)) {\n return false;\n }\n if (consumer->shape().dimensions(old_space_dim) !=\n operand->shape().dimensions(old_space_dim)) {\n return false;\n }\n return true;\n }\n if (SupportedDotForPropagation(consumer, producer)) {\n return true;\n }\n if (consumer->opcode() == HloOpcode::kReduce) {\n if (consumer->shape().IsTuple()) {\n return false;\n }\n auto reduce_dims = consumer->dimensions();\n auto result = instr_to_dim_map_[consumer->mutable_operand(0)];\n const int64_t batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n if (!absl::c_linear_search(reduce_dims, batch_dim) &&\n !absl::c_linear_search(reduce_dims, space_dim)) {\n return true;\n }\n return absl::c_linear_search(reduce_dims, batch_dim) &&\n absl::c_linear_search(reduce_dims, space_dim);\n }\n if (consumer->opcode() == HloOpcode::kReduceWindow &&\n consumer->shape().IsTuple()) {\n return false;\n }\n if (consumer->opcode() == HloOpcode::kReduceWindow ||\n consumer->opcode() == HloOpcode::kSelectAndScatter) {\n auto first_operand = consumer->mutable_operand(0);\n auto window = consumer->window();\n if (instr_to_dim_map_.count(first_operand) <= 0) {\n VLOG(1) << \"Dim map not found on windowed operand. Window dim count \"\n << window.dimensions().size();\n return false;\n }\n auto result = instr_to_dim_map_[first_operand];\n const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n if (window.dimensions(old_batch_dim).size() != 1) {\n return false;\n }\n if (window.dimensions(old_space_dim).padding_low() != 0) {\n return false;\n }\n if (window.dimensions(old_space_dim).base_dilation() != 1 ||\n window.dimensions(old_space_dim).window_dilation() != 1) {\n return false;\n }\n if (window.dimensions(old_batch_dim).base_dilation() != 1 ||\n window.dimensions(old_batch_dim).window_dilation() != 1) {\n return false;\n }\n if (window.dimensions(old_space_dim).padding_high() >\n window.dimensions(old_space_dim).size()) {\n return false;\n }\n if (old_to_new_instrs_.count(first_operand) <= 0) {\n return false;\n }\n auto new_operand = old_to_new_instrs_[first_operand];\n auto permute_dims = instr_to_dim_permute_map_[new_operand];\n if (consumer->opcode() == HloOpcode::kSelectAndScatter) {\n const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim);\n if (new_operand->shape().dimensions(new_space_dim) %\n window.dimensions(old_space_dim).stride() !=\n 0) {\n return false;\n }\n if (!ShapeUtil::ElementIsFloating(consumer->shape())) {\n return false;\n }\n auto scatter_comp = consumer->scatter();\n if (!Match(scatter_comp->root_instruction(),\n m::AddAnyOrder(m::Parameter(0), m::Parameter(1)))) {\n return false;\n }\n auto select_comp = consumer->select();\n if (!Match(select_comp->root_instruction(),\n m::Compare(m::Parameter(0), m::Parameter(1))\n .WithComparisonDirection(ComparisonDirection::kGe)) &&\n !Match(select_comp->root_instruction(),\n m::Compare(m::Parameter(1), m::Parameter(0))\n .WithComparisonDirection(ComparisonDirection::kGe))) {\n return false;\n }\n if (consumer->window().dimensions(old_space_dim).padding_low() != 0) {\n return false;\n }\n }\n return true;\n }\n return false;\n}\nabsl::StatusOr ConvolutionVisitor::Propagate(HloInstruction* consumer,\n HloInstruction* producer) {\n auto computation = consumer->parent();\n if (IsTrivialElementwise(consumer)) {\n auto dim_map_val = instr_to_dim_map_[producer];\n auto new_consumer = computation->AddInstruction(consumer->Clone());\n bool is_pivot_producer_modified = false;\n if (consumer->IsElementwiseBinary() ||\n consumer->opcode() == HloOpcode::kSelect) {\n int64_t pivot_operand_number = -1;\n HloInstruction* pivot_operand = nullptr;\n for (int i = 0; i < consumer->operand_count(); ++i) {\n if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {\n continue;\n }\n auto operand = consumer->mutable_operand(i);\n if (old_to_new_instrs_.contains(operand)) {\n if (pivot_operand_number == -1 ||\n old_to_new_instrs_[pivot_operand]->shape().dimensions() <\n old_to_new_instrs_[operand]->shape().dimensions()) {\n is_pivot_producer_modified = true;\n pivot_operand_number = i;\n pivot_operand = consumer->mutable_operand(pivot_operand_number);\n }\n }\n }\n if (pivot_operand_number != -1) {\n producer = pivot_operand;\n }\n }\n for (int64_t i = 0; i < consumer->operand_count(); ++i) {\n std::vector instructions_to_transform;\n if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) {\n auto broadcast = consumer->mutable_operand(i);\n PropagateOnBroadcast(broadcast, producer);\n HloInstruction* new_broadcast = nullptr;\n auto new_producer = old_to_new_instrs_[producer];\n for (auto previous_broadcast : broadcast_map_[broadcast]) {\n if (ShapeUtil::CompatibleIgnoringElementType(\n previous_broadcast->shape(), new_producer->shape())) {\n new_broadcast = previous_broadcast;\n break;\n }\n }\n CHECK_NE(new_broadcast, nullptr);\n TF_CHECK_OK(\n new_consumer->ReplaceOperandWithDifferentShape(i, new_broadcast));\n } else if (old_to_new_instrs_.contains(consumer->mutable_operand(i))) {\n HloInstruction* operand_to_use = nullptr;\n auto result = instr_to_dim_map_[producer];\n const int64_t old_batch_dim =\n result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n const int64_t old_batch_size =\n producer->shape().dimensions(old_batch_dim);\n HloInstruction* new_instr =\n old_to_new_instrs_[consumer->mutable_operand(i)];\n HloInstruction* pivot_new_instr = old_to_new_instrs_[producer];\n auto permute_dims = instr_to_dim_permute_map_[new_instr];\n const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim);\n const int64_t space_dim = DimLookUp(permute_dims, old_space_dim);\n const int64_t batch_size = new_instr->shape().dimensions(batch_dim);\n if (new_instr->shape().dimensions(space_dim) !=\n pivot_new_instr->shape().dimensions(space_dim)) {\n CHECK_EQ(batch_dim + 1, space_dim);\n std::vector new_dimensions(\n new_instr->shape().dimensions().begin(),\n new_instr->shape().dimensions().end());\n new_dimensions[space_dim] *= (batch_size / old_batch_size);\n new_dimensions[batch_dim] = old_batch_size;\n TF_ASSIGN_OR_RETURN(HloInstruction * reshape,\n MakeReshapeHlo(new_dimensions, new_instr));\n const int64_t pivot_space_size =\n pivot_new_instr->shape().dimensions(space_dim) * batch_size /\n old_batch_size;\n CHECK(pivot_space_size > new_dimensions[space_dim] ||\n !is_pivot_producer_modified);\n PaddingConfig padding_config =\n MakeNoPaddingConfig(reshape->shape().dimensions_size());\n padding_config.mutable_dimensions(space_dim)->set_edge_padding_high(\n pivot_space_size - new_dimensions[space_dim]);\n padding_config.mutable_dimensions(space_dim)->set_edge_padding_low(0);\n HloInstruction* padding =\n consumer->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(reshape->shape().element_type())));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * padded_operand,\n MakePadHlo(reshape, padding, padding_config, &reshape->metadata(),\n &reshape->frontend_attributes()));\n TF_ASSIGN_OR_RETURN(\n operand_to_use,\n MakeReshapeHlo(pivot_new_instr->shape().dimensions(),\n padded_operand));\n } else {\n operand_to_use = old_to_new_instrs_[consumer->mutable_operand(i)];\n }\n TF_CHECK_OK(\n new_consumer->ReplaceOperandWithDifferentShape(i, operand_to_use));\n } else if (consumer->IsElementwiseBinary() &&\n consumer->mutable_operand(i)->opcode() ==\n HloOpcode::kBroadcast &&\n IsBroadcastTree(consumer->mutable_operand(i), producer,\n instructions_to_transform)) {\n RewriteBroadcastTree(producer, instructions_to_transform);\n TF_CHECK_OK(new_consumer->ReplaceOperandWithDifferentShape(\n i, old_to_new_instrs_[consumer->mutable_operand(i)]));\n } else if (consumer->operand(i)->opcode() == HloOpcode::kConstant) {\n TF_ASSIGN_OR_RETURN(\n auto new_constant,\n PropagateOnConstant(consumer->mutable_operand(i), producer));\n TF_CHECK_OK(\n new_consumer->ReplaceOperandWithDifferentShape(i, new_constant));\n }\n }\n auto old_type = new_consumer->mutable_shape()->element_type();\n *(new_consumer->mutable_shape()) = old_to_new_instrs_[producer]->shape();\n new_consumer->mutable_shape()->set_element_type(old_type);\n old_to_new_instrs_[consumer] = new_consumer;\n instr_to_dim_map_[consumer] = std::vector(dim_map_val);\n CHECK(instr_to_dim_permute_map_.contains(old_to_new_instrs_[producer]));\n instr_to_dim_permute_map_[new_consumer] = std::vector(\n instr_to_dim_permute_map_[old_to_new_instrs_[producer]]);\n VLOG(2) << \" new_consumer \" << new_consumer->ToString()\n << \" old_to_new_instrs_[producer] \"\n << old_to_new_instrs_[producer]->ToString() << \" permute dims \"\n << instr_to_dim_permute_map_.count(new_consumer);\n return true;\n }\n if (consumer->opcode() == HloOpcode::kConvolution) {\n if (IsConvSuitableForSpaceToBatch(consumer)) {\n TF_CHECK_OK(PropagateOnConv(consumer));\n return true;\n } else {\n TF_CHECK_OK(PropagateOnBackpropFilterConv(consumer));\n return false;\n }\n }\n if (consumer->opcode() == HloOpcode::kConcatenate) {\n TF_CHECK_OK(PropagateOnConcat(consumer));\n return true;\n }\n if (consumer->opcode() == HloOpcode::kReverse) {\n TF_CHECK_OK(PropagateOnReverse(consumer));\n return true;\n }\n if (consumer->opcode() == HloOpcode::kDot) {\n auto dim_map_val = instr_to_dim_map_[producer];\n const int64_t old_batch_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];\n int64_t new_batch_dim = -1;\n int64_t new_space_dim = -1;\n int64_t outer = 0;\n for (int64_t i = 0; i < producer->shape().rank(); ++i) {\n if (absl::c_linear_search(\n consumer->dot_dimension_numbers().lhs_batch_dimensions(), i) ||\n absl::c_linear_search(\n consumer->dot_dimension_numbers().lhs_contracting_dimensions(),\n i)) {\n continue;\n }\n if (i == old_batch_dim) {\n new_batch_dim =\n outer +\n consumer->dot_dimension_numbers().lhs_batch_dimensions_size();\n }\n if (i == old_space_dim) {\n new_batch_dim =\n outer +\n consumer->dot_dimension_numbers().lhs_batch_dimensions_size();\n }\n ++outer;\n }\n std::vector dim_map(kNumMappedDims);\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim;\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim;\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =\n consumer->shape().rank() - 1;\n instr_to_dim_map_[consumer] = dim_map;\n auto new_consumer = computation->AddInstruction(consumer->Clone());\n new_consumer->mutable_shape()->mutable_dimensions()[new_batch_dim] =\n producer->shape().dimensions(old_batch_dim);\n new_consumer->mutable_shape()->mutable_dimensions()[new_space_dim] =\n producer->shape().dimensions(old_space_dim);\n old_to_new_instrs_[consumer] = new_consumer;\n return true;\n }\n if (consumer->opcode() == HloOpcode::kPad) {\n TF_CHECK_OK(PropagateOnPad(consumer));\n return true;\n }\n if (consumer->opcode() == HloOpcode::kSlice) {\n TF_CHECK_OK(PropagateOnSlice(consumer));\n return true;\n }\n if (consumer->opcode() == HloOpcode::kReduce) {\n auto reduce_dims = consumer->dimensions();\n auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];\n auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];\n auto permute_dims = instr_to_dim_permute_map_[first_operand];\n const int64_t old_batch_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t space_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];\n const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);\n const int64_t new_space_dim = DimLookUp(permute_dims, space_dim);\n std::vector changed_dims(consumer->dimensions().size());\n if (!absl::c_linear_search(reduce_dims, old_batch_dim) &&\n !absl::c_linear_search(reduce_dims, space_dim)) {\n for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {\n changed_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i));\n }\n int64_t new_output_batch_dim = new_batch_dim;\n int64_t new_output_space_dim = new_space_dim;\n for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {\n if (changed_dims[i] < new_batch_dim) {\n new_output_batch_dim--;\n }\n if (changed_dims[i] < new_space_dim) {\n new_output_space_dim--;\n }\n }\n int64_t old_output_batch_dim = old_batch_dim;\n int64_t old_output_space_dim = space_dim;\n for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {\n if (reduce_dims[i] < old_batch_dim) {\n old_output_batch_dim--;\n }\n if (reduce_dims[i] < space_dim) {\n old_output_space_dim--;\n }\n }\n HloInstruction* new_consumer = nullptr;\n TF_ASSIGN_OR_RETURN(\n new_consumer,\n MakeReduceHlo(first_operand, consumer->mutable_operand(1),\n changed_dims, consumer->called_computations()[0]));\n VLOG(3) << \" new_output_batch_dim \" << new_output_batch_dim << \" size \"\n << first_operand->shape().dimensions(new_batch_dim)\n << \" new_output_space_dim \" << new_output_space_dim << \" size \"\n << first_operand->shape().dimensions(new_space_dim);\n std::vector dim_map(kNumMappedDims);\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = old_output_batch_dim;\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = old_output_space_dim;\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = -1;\n instr_to_dim_map_[consumer] = dim_map;\n const int64_t rank = first_operand->shape().rank();\n const int64_t output_rank = new_consumer->shape().rank();\n std::vector old_reduce_output_to_input(output_rank);\n int dim_number_to_assign_old = 0;\n for (int64_t i = 0; i < rank; ++i) {\n if (auto it = absl::c_find(reduce_dims, i); it != reduce_dims.end()) {\n continue;\n }\n old_reduce_output_to_input[dim_number_to_assign_old++] = i;\n }\n std::vector new_reduce_output_to_input(output_rank);\n int dim_number_to_assign_new = 0;\n for (int64_t i = 0; i < rank; ++i) {\n if (auto it = absl::c_find(changed_dims, i); it != changed_dims.end()) {\n continue;\n }\n new_reduce_output_to_input[dim_number_to_assign_new++] = i;\n }\n std::vector new_permute_dims(output_rank);\n for (int64_t i = 0; i < output_rank; ++i) {\n new_permute_dims[i] = std::distance(\n new_reduce_output_to_input.begin(),\n absl::c_find(\n new_reduce_output_to_input,\n DimLookUp(permute_dims, old_reduce_output_to_input[i])));\n }\n instr_to_dim_permute_map_[new_consumer] = new_permute_dims;\n old_to_new_instrs_[consumer] = new_consumer;\n return true;\n }\n HloInstruction* new_consumer =\n computation->AddInstruction(consumer->Clone());\n auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));\n std::vector old_spatial_dims = retval.first;\n std::vector new_spatial_dims = retval.second;\n TF_ASSIGN_OR_RETURN(\n first_operand,\n SelectValidPortion(first_operand, consumer->mutable_operand(0),\n consumer->mutable_operand(1), new_batch_dim,\n new_spatial_dims, old_batch_dim, old_spatial_dims));\n for (int64_t i = 0; i < new_consumer->dimensions().size(); ++i) {\n changed_dims[i] = DimLookUp(permute_dims, new_consumer->dimensions(i));\n }\n *(new_consumer->mutable_dimensions()) = changed_dims;\n TF_CHECK_OK(\n new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));\n old_to_new_instrs_[consumer] = new_consumer;\n instr_to_dim_map_[consumer] = std::vector(dim_map_val);\n return false;\n }\n if (consumer->opcode() == HloOpcode::kTranspose) {\n auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];\n auto new_consumer = computation->AddInstruction(first_operand->Clone());\n old_to_new_instrs_[consumer] = new_consumer;\n auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];\n const int64_t old_batch_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)];\n const int64_t old_feature_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kFeature)];\n int64_t new_batch_dim, new_space_dim, new_feature_dim;\n std::vector new_dimensions(consumer->dimensions().size());\n for (int64_t ctr = 0; ctr < consumer->dimensions().size(); ++ctr) {\n int64_t dim = consumer->dimensions(ctr);\n if (dim == old_batch_dim) {\n new_batch_dim = ctr;\n }\n if (dim == old_space_dim) {\n new_space_dim = ctr;\n }\n if (dim == old_feature_dim) {\n new_feature_dim = ctr;\n }\n }\n std::vector dim_map(kNumMappedDims);\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim;\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = new_feature_dim;\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim;\n instr_to_dim_map_[consumer] = dim_map;\n std::vector new_permute_dims(consumer->dimensions().size());\n auto permute_dims = instr_to_dim_permute_map_[first_operand];\n for (int64_t i = 0; i < consumer->dimensions().size(); ++i) {\n new_permute_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i));\n }\n instr_to_dim_permute_map_[new_consumer] = new_permute_dims;\n return true;\n }\n if (consumer->opcode() == HloOpcode::kReduceWindow ||\n consumer->opcode() == HloOpcode::kSelectAndScatter) {\n bool is_select_and_scatter =\n consumer->opcode() == HloOpcode::kSelectAndScatter;\n auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)];\n auto init_val = is_select_and_scatter ? consumer->mutable_operand(2)\n : consumer->mutable_operand(1);\n auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)];\n auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0));\n std::vector old_spatial_dims = retval.first;\n std::vector new_spatial_dims = retval.second;\n const int64_t old_batch_dim =\n dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim = old_spatial_dims[0];\n auto permute_dims = instr_to_dim_permute_map_[first_operand];\n const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim);\n const int64_t new_space_dim = new_spatial_dims[0];\n auto new_shape = first_operand->shape();\n auto old_shape = consumer->mutable_operand(0)->shape();\n const int64_t new_space_size = new_shape.dimensions(new_space_dim);\n const int64_t stride =\n consumer->window().dimensions(old_space_dim).stride();\n auto pad_val =\n is_select_and_scatter\n ? consumer->AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::MinValue(\n consumer->operand(2)->shape().element_type())))\n : init_val;\n TF_ASSIGN_OR_RETURN(\n first_operand,\n SelectValidPortion(first_operand, consumer->mutable_operand(0), pad_val,\n new_batch_dim, new_spatial_dims, old_batch_dim,\n old_spatial_dims));\n const int64_t extra_space = new_space_size % stride;\n if (extra_space) {\n CHECK_EQ(consumer->opcode(), HloOpcode::kReduceWindow);\n const int64_t old_batch_size = old_shape.dimensions(old_batch_dim);\n const int64_t old_space_size = old_shape.dimensions(old_space_dim);\n if ((new_space_size - extra_space) * old_batch_size *\n ctrl_.number_of_splits >=\n old_batch_size * old_space_size) {\n TF_ASSIGN_OR_RETURN(\n first_operand, ChangeSpatialSizeOnSpaceToBatchedShape(\n first_operand, new_batch_dim, old_batch_size,\n new_spatial_dims, new_space_size - extra_space));\n } else {\n TF_ASSIGN_OR_RETURN(\n first_operand,\n ChangeSpatialSizeOnSpaceToBatchedShape(\n first_operand, new_batch_dim, old_batch_size, new_spatial_dims,\n new_space_size + stride - extra_space,\n true));\n }\n }\n const int64_t window_size =\n consumer->window().dimensions(old_space_dim).size();\n const int64_t last_overlap_point = ((new_space_size - 1) / stride) * stride;\n VLOG(1) << \"last_overlap_point \" << last_overlap_point << \" window_size \"\n << window_size << \" new_space_size \" << new_space_size;\n const int64_t halo_size = last_overlap_point + window_size - new_space_size;\n if (halo_size > 0) {\n TF_ASSIGN_OR_RETURN(\n first_operand,\n HaloDuplicateWithSlice(first_operand, new_spatial_dims, new_batch_dim,\n 0, halo_size, init_val));\n }\n Window new_win;\n for (int64_t i = 0; i < consumer->window().dimensions().size(); ++i) {\n auto dim = ReverseDimLookUp(permute_dims, i);\n new_win.add_dimensions();\n new_win.mutable_dimensions(i)->set_stride(\n consumer->window().dimensions(dim).stride());\n new_win.mutable_dimensions(i)->set_size(\n consumer->window().dimensions(dim).size());\n if (i == old_space_dim) {\n new_win.mutable_dimensions(i)->set_padding_high(0);\n new_win.mutable_dimensions(i)->set_padding_low(0);\n } else {\n new_win.mutable_dimensions(i)->set_padding_high(\n consumer->window().dimensions(dim).padding_high());\n new_win.mutable_dimensions(i)->set_padding_low(\n consumer->window().dimensions(dim).padding_low());\n }\n new_win.mutable_dimensions(i)->set_window_dilation(\n consumer->window().dimensions(dim).window_dilation());\n new_win.mutable_dimensions(i)->set_base_dilation(\n consumer->window().dimensions(dim).base_dilation());\n new_win.mutable_dimensions(i)->set_window_reversal(\n consumer->window().dimensions(dim).window_reversal());\n }\n new_shape = first_operand->shape();\n HloInstruction* new_consumer = nullptr;\n if (is_select_and_scatter) {\n auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)];\n auto select_comp = consumer->select();\n auto scatter_comp = consumer->scatter();\n TF_ASSIGN_OR_RETURN(\n auto new_select_and_scatter_shape,\n ShapeInference::InferSelectAndScatterShape(\n new_shape, select_comp->ComputeProgramShape(), new_win,\n second_operand->shape(), init_val->shape(),\n scatter_comp->ComputeProgramShape()));\n new_consumer = computation_->AddInstruction(\n HloInstruction::CreateSelectAndScatter(\n new_select_and_scatter_shape, first_operand, select_comp, new_win,\n second_operand, init_val, scatter_comp),\n &consumer->metadata(), &consumer->frontend_attributes());\n TF_CHECK_OK(\n new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));\n TF_CHECK_OK(\n new_consumer->ReplaceOperandWithDifferentShape(1, second_operand));\n VLOG(2) << \"New select and scatter \" << new_consumer->ToString();\n if (halo_size > 0) {\n const int64_t rank = new_consumer->shape().rank();\n const int64_t batch_size =\n new_consumer->shape().dimensions(new_batch_dim);\n std::vector start_indices(rank, 0),\n end_indices(new_consumer->shape().dimensions().begin(),\n new_consumer->shape().dimensions().end()),\n strides(rank, 1);\n start_indices[new_space_dim] = new_space_size;\n end_indices[new_space_dim] = new_space_size + halo_size;\n end_indices[new_batch_dim] = batch_size - 1;\n TF_ASSIGN_OR_RETURN(\n HloInstruction * bottom,\n MakeSliceHlo(new_consumer, start_indices, end_indices, strides,\n &consumer->metadata(),\n &consumer->frontend_attributes()));\n std::vector start_indices_top(rank, 0),\n end_indices_top(new_consumer->shape().dimensions().begin(),\n new_consumer->shape().dimensions().end());\n end_indices_top[new_space_dim] = halo_size;\n start_indices_top[new_batch_dim] = 1;\n TF_ASSIGN_OR_RETURN(\n HloInstruction * top,\n MakeSliceHlo(new_consumer, start_indices_top, end_indices_top,\n strides, &consumer->metadata(),\n &consumer->frontend_attributes()));\n HloInstruction* default_fill = MakeBroadcastHlo(\n init_val, {}, top->shape().dimensions(), &init_val->metadata(),\n &init_val->frontend_attributes());\n TF_ASSIGN_OR_RETURN(\n HloInstruction * bottom_compare,\n MakeCompareHlo(ComparisonDirection::kNe, bottom, default_fill,\n &bottom->metadata(),\n &bottom->frontend_attributes()));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * bottom_taken,\n MakeSelectHlo(bottom_compare, bottom, default_fill, nullptr,\n &bottom_compare->metadata(),\n &bottom_compare->frontend_attributes()));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * top_compare,\n MakeCompareHlo(ComparisonDirection::kNe, top, default_fill,\n &top->metadata(), &top->frontend_attributes()));\n TF_ASSIGN_OR_RETURN(HloInstruction * top_taken,\n MakeSelectHlo(top_compare, top, bottom_taken,\n nullptr, &top_compare->metadata(),\n &top_compare->frontend_attributes()));\n TF_ASSIGN_OR_RETURN(HloInstruction * both_compare,\n MakeBinaryHlo(HloOpcode::kAnd, top_compare,\n bottom_compare, &consumer->metadata(),\n &consumer->frontend_attributes()));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * both_added,\n MakeBinaryHlo(HloOpcode::kAdd, top, bottom, &consumer->metadata(),\n &consumer->frontend_attributes()));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * final_selection,\n MakeSelectHlo(both_compare, both_added, top_taken, nullptr,\n &both_compare->metadata(),\n &both_compare->frontend_attributes()));\n PaddingConfig padding_config =\n MakeNoPaddingConfig(final_selection->shape().dimensions_size());\n padding_config.mutable_dimensions(new_batch_dim)\n ->set_edge_padding_low(1);\n padding_config.mutable_dimensions(new_space_dim)\n ->set_edge_padding_high(new_space_size);\n HloInstruction* padding = computation_->AddInstruction(\n HloInstruction::CreateConstant(\n LiteralUtil::Zero(final_selection->shape().element_type())),\n &consumer->metadata(), &consumer->frontend_attributes());\n TF_ASSIGN_OR_RETURN(\n final_selection,\n MakePadHlo(final_selection, padding, padding_config,\n &final_selection->metadata(),\n &final_selection->frontend_attributes()));\n tsl::core::Bitmap b(batch_size * (new_space_size + halo_size));\n for (int k = 0; k < batch_size * (new_space_size + halo_size); ++k) {\n const int64_t space_index = k % (new_space_size + halo_size);\n const int64_t batch_index = (k / (new_space_size + halo_size));\n if (batch_index < 1 || space_index >= halo_size) {\n b.set(k);\n } else {\n b.clear(k);\n }\n }\n auto arg_literal = LiteralUtil::CreateR1(b);\n VLOG(4) << \"Slice mask created: arg literal \" << arg_literal.ToString();\n HloInstruction* slice_mask = computation_->AddInstruction(\n HloInstruction::CreateConstant(std::move(arg_literal)),\n &consumer->metadata(), &consumer->frontend_attributes());\n std::vector slice_mask_reshape_dims(2);\n slice_mask_reshape_dims[0] = batch_size;\n slice_mask_reshape_dims[1] = (new_space_size + halo_size);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * slice_mask_reshaped,\n MakeReshapeHlo(slice_mask_reshape_dims, slice_mask));\n HloInstruction* shape_mask = MakeBroadcastHlo(\n slice_mask_reshaped, {new_batch_dim, new_space_dim},\n final_selection->shape().dimensions(), &slice_mask->metadata(),\n &slice_mask->frontend_attributes());\n TF_ASSIGN_OR_RETURN(\n new_consumer,\n MakeSelectHlo(shape_mask, new_consumer, final_selection, nullptr,\n &shape_mask->metadata(),\n &shape_mask->frontend_attributes()));\n }\n auto previous_shape =\n old_to_new_instrs_[consumer->mutable_operand(0)]->shape();\n std::vector start_indices(previous_shape.rank(), 0),\n end_indices(previous_shape.dimensions().begin(),\n previous_shape.dimensions().end()),\n strides(previous_shape.rank(), 1);\n TF_ASSIGN_OR_RETURN(new_consumer,\n MakeSliceHlo(new_consumer, start_indices, end_indices,\n strides, &consumer->metadata(),\n &consumer->frontend_attributes()));\n } else {\n auto reduce_comp = consumer->to_apply();\n TF_ASSIGN_OR_RETURN(auto new_reduce_window_shape,\n ShapeInference::InferReduceWindowShape(\n new_shape, init_val->shape(), new_win));\n new_consumer = computation_->AddInstruction(\n HloInstruction::CreateReduceWindow(new_reduce_window_shape,\n first_operand, init_val, new_win,\n reduce_comp),\n &consumer->metadata(), &consumer->frontend_attributes());\n TF_CHECK_OK(\n new_consumer->ReplaceOperandWithDifferentShape(0, first_operand));\n VLOG(1) << \"New reduce window \" << new_consumer->ToString();\n }\n old_to_new_instrs_[consumer] = new_consumer;\n instr_to_dim_map_[consumer] = std::vector(dim_map_val);\n instr_to_dim_permute_map_[new_consumer] = std::vector(\n instr_to_dim_permute_map_[old_to_new_instrs_[consumer->mutable_operand(\n 0)]]);\n return true;\n }\n LOG(FATAL) << \"Trying to propagate through an unsupported instruction \"\n << consumer->ToString();\n return true;\n}\nabsl::StatusOr ConvolutionVisitor::SelectValidPortion(\n HloInstruction* new_instr, HloInstruction* old_instr,\n HloInstruction* select_val, int64_t new_batch_dim,\n absl::Span new_space_dims, int64_t old_batch_dim,\n absl::Span old_space_dims) {\n auto new_shape = new_instr->shape();\n auto old_shape = old_instr->shape();\n VLOG(1) << \"In SelectValidPortion new_batch_dim \" << new_batch_dim\n << \" new_space_dim \" << new_space_dims[0] << \" old_batch_dim \"\n << old_batch_dim << \" old_space_dim \" << old_space_dims[0];\n const int64_t new_batch_size = new_shape.dimensions(new_batch_dim);\n const int64_t new_space_size = new_shape.dimensions(new_space_dims[0]);\n const int64_t old_batch_size = old_shape.dimensions(old_batch_dim);\n const int64_t old_space_size = old_shape.dimensions(old_space_dims[0]);\n CHECK_EQ(new_batch_size % old_batch_size, 0)\n << \" New batch size \" << new_batch_size << \" old batch size \"\n << old_batch_size;\n const int64_t num_splits = ctrl_.number_of_splits;\n const int64_t spatial_dim_count = new_space_dims.size();\n std::vector bounds(2 + spatial_dim_count, new_space_size);\n bounds[0] = old_batch_size;\n bounds[1] = IPow(num_splits, spatial_dim_count);\n const int64_t total_new_space =\n IPow(new_space_size, spatial_dim_count);\n tsl::core::Bitmap b(new_batch_size * total_new_space);\n for (int k = 0; k < new_batch_size * total_new_space; ++k) {\n auto radix = ToMixedRadix(k, bounds);\n bool out_of_bounds = false;\n int64_t batch_residue = 1;\n for (int i = 0; i < spatial_dim_count; ++i) {\n const int64_t space_index = radix[2 + i];\n const int64_t batch_index = (radix[1] / batch_residue) % num_splits;\n batch_residue *= num_splits;\n if (batch_index * new_space_size + space_index >= old_space_size) {\n out_of_bounds = true;\n }\n }\n if (!out_of_bounds) {\n b.set(k);\n } else {\n b.clear(k);\n }\n }\n auto arg_literal = LiteralUtil::CreateR1(b);\n VLOG(4) << \"Slice mask created: arg literal \" << arg_literal.ToString();\n HloInstruction* slice_mask = computation_->AddInstruction(\n HloInstruction::CreateConstant(std::move(arg_literal)),\n &old_instr->metadata(), &old_instr->frontend_attributes());\n std::vector slice_mask_reshape_dims(1 + spatial_dim_count,\n new_space_size);\n slice_mask_reshape_dims[0] = new_batch_size;\n TF_ASSIGN_OR_RETURN(HloInstruction * slice_mask_reshaped,\n MakeReshapeHlo(slice_mask_reshape_dims, slice_mask));\n std::vector broadcast_dims(new_space_dims.begin(),\n new_space_dims.end());\n broadcast_dims.insert(broadcast_dims.begin(), new_batch_dim);\n HloInstruction* shape_mask = MakeBroadcastHlo(\n slice_mask_reshaped, broadcast_dims, new_instr->shape().dimensions(),\n &slice_mask_reshaped->metadata(),\n &slice_mask_reshaped->frontend_attributes());\n VLOG(1) << \"Shape mask made \" << shape_mask->ToString();\n HloInstruction* zeroes = MakeBroadcastHlo(\n select_val, {}, new_instr->shape().dimensions(), &select_val->metadata(),\n &select_val->frontend_attributes());\n TF_ASSIGN_OR_RETURN(new_instr,\n MakeSelectHlo(shape_mask, new_instr, zeroes, nullptr,\n &shape_mask->metadata(),\n &shape_mask->frontend_attributes()));\n return new_instr;\n}\nabsl::StatusOr ConvolutionVisitor::BatchToSpace(\n HloInstruction* old_instr) {\n if (batch_to_space_map_.count(old_instr)) {\n CHECK_NE(batch_to_space_map_[old_instr], nullptr);\n return batch_to_space_map_[old_instr];\n }\n auto result = instr_to_dim_map_[old_instr];\n const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)];\n const int64_t old_batch_size = old_instr->shape().dimensions(old_batch_dim);\n CHECK(old_to_new_instrs_.contains(old_instr));\n auto new_instr = old_to_new_instrs_[old_instr];\n VLOG(2) << \"old_batch_dim \" << old_batch_dim << \" old_space_dim \"\n << old_space_dim << \" old_instr \" << old_instr->ToString()\n << \"\\n new_instr \" << new_instr->ToString() << \" permute dims \"\n << instr_to_dim_permute_map_.count(new_instr) << \" old_batch_size \"\n << old_batch_size;\n CHECK(instr_to_dim_permute_map_.contains(new_instr));\n auto permute_dims = instr_to_dim_permute_map_[new_instr];\n const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim);\n const int64_t space_dim = DimLookUp(permute_dims, old_space_dim);\n const int64_t spatial_dim_size = new_instr->shape().dimensions(space_dim);\n std::vector split_spatial_dimensions(\n ctrl_.count_of_dimensions_to_convert);\n absl::c_iota(split_spatial_dimensions, space_dim);\n TF_ASSIGN_OR_RETURN(new_instr, SplitAndTransposeMergedBatch(\n new_instr, batch_dim, old_batch_size,\n split_spatial_dimensions));\n std::vector new_dimensions(new_instr->shape().dimensions().begin(),\n new_instr->shape().dimensions().end());\n new_dimensions.erase(new_dimensions.begin() + split_spatial_dimensions[0],\n new_dimensions.begin() + split_spatial_dimensions[0] +\n ctrl_.count_of_dimensions_to_convert);\n for (auto spatial_dimension : split_spatial_dimensions) {\n new_dimensions[spatial_dimension] =\n spatial_dim_size * ctrl_.number_of_splits;\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * reshape,\n MakeReshapeHlo(new_dimensions, new_instr));\n VLOG(1) << \"Batch to space reshape \" << reshape->ToString();\n const int64_t rank = old_instr->shape().rank();\n std::vector start_indices(rank, 0),\n end_indices(new_dimensions.begin(), new_dimensions.end()),\n strides(rank, 1);\n for (auto spatial_dimension : split_spatial_dimensions) {\n end_indices[spatial_dimension] =\n old_instr->shape().dimensions(old_space_dim);\n }\n TF_ASSIGN_OR_RETURN(\n HloInstruction * output_slice,\n MakeSliceHlo(reshape, start_indices, end_indices, strides,\n &reshape->metadata(), &reshape->frontend_attributes()));\n VLOG(1) << \"Batch to space slice \" << output_slice->ToString();\n std::vector transpose_dims(permute_dims);\n TF_ASSIGN_OR_RETURN(HloInstruction * output_transpose,\n MakeTransposeHlo(output_slice, transpose_dims));\n old_instr->SetupDerivedInstruction(output_transpose);\n batch_to_space_map_[old_instr] = output_transpose;\n return output_transpose;\n}\nabsl::Status ConvolutionVisitor::PropagateOnUsers(HloInstruction* old_conv) {\n std::queue> propagation_worklist;\n if (old_conv->user_count() == 0) {\n TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space,\n BatchToSpace(old_conv));\n VLOG(1) << \"Replacing the root instruction to \"\n << batch_to_space->ToString();\n TF_CHECK_OK(computation_->ReplaceInstruction(old_conv, batch_to_space));\n VLOG(1) << \"Replacement successful\";\n return absl::OkStatus();\n }\n int64_t iteration_count = 0;\n propagation_worklist.push(\n std::make_pair(old_conv, old_conv->mutable_operand(0)));\n while (!propagation_worklist.empty()) {\n auto top = propagation_worklist.front();\n auto node = top.first;\n auto parent = top.second;\n VLOG(1) << \"Traversing for propagation operating on \" << node->ToString();\n propagation_worklist.pop();\n if (old_to_new_instrs_.count(node) > 0 && iteration_count != 0) {\n continue;\n }\n bool needs_further_propagation = true;\n if (iteration_count != 0) {\n TF_ASSIGN_OR_RETURN(needs_further_propagation, Propagate(node, parent));\n }\n iteration_count++;\n if (node->parent()->root_instruction() == node) {\n if (!needs_further_propagation) {\n VLOG(1) << \"Replacing the root instruction to \"\n << old_to_new_instrs_[node]->ToString();\n TF_CHECK_OK(\n computation_->ReplaceInstruction(node, old_to_new_instrs_[node]));\n continue;\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space, BatchToSpace(node));\n VLOG(1) << \"Replacing the root instruction to \"\n << batch_to_space->ToString();\n TF_CHECK_OK(computation_->ReplaceInstruction(node, batch_to_space));\n } else {\n if (!needs_further_propagation) {\n TF_CHECK_OK(\n computation_->ReplaceInstruction(node, old_to_new_instrs_[node]));\n continue;\n }\n HloInstructionSet unsupported_users;\n for (auto user : node->users()) {\n if (!SupportedOpForPropagation(user, node)) {\n VLOG(1) << \"Unsupported op found \" << user->ToString();\n unsupported_users.insert(user);\n continue;\n }\n if (CanPropagate(user, node)) {\n non_propagatable_instrs_.erase(user);\n propagation_worklist.push(std::make_pair(user, node));\n } else {\n non_propagatable_instrs_.insert(user);\n }\n }\n if (!unsupported_users.empty()) {\n TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space,\n BatchToSpace(node));\n for (auto user : unsupported_users) {\n for (int64_t i = 0; i < user->operand_count(); ++i) {\n if (user->operand(i) == node) {\n TF_CHECK_OK(user->ReplaceOperandWith(i, batch_to_space));\n }\n }\n }\n }\n }\n }\n return absl::OkStatus();\n}\nabsl::Status ConvolutionVisitor::PropagateOnConv(HloInstruction* convolution) {\n auto activations_old = convolution->mutable_operand(0);\n CHECK(old_to_new_instrs_.contains(activations_old));\n auto activations_new = old_to_new_instrs_[activations_old];\n auto permute_dims = instr_to_dim_permute_map_[activations_new];\n auto original_conv_dims = convolution->convolution_dimension_numbers();\n auto old_new_dims = GetSpatialDimsToSplit(activations_old);\n std::vector old_spatial_dims = old_new_dims.first;\n std::vector new_spatial_dims = old_new_dims.second;\n auto permuted_conv_dims_numbers = original_conv_dims;\n int64_t activations_batch_dim =\n DimLookUp(permute_dims, original_conv_dims.input_batch_dimension());\n int64_t activations_feature_dim =\n DimLookUp(permute_dims, original_conv_dims.input_feature_dimension());\n permuted_conv_dims_numbers.set_input_batch_dimension(activations_batch_dim);\n permuted_conv_dims_numbers.set_input_feature_dimension(\n activations_feature_dim);\n for (int64_t i = 0; i < original_conv_dims.input_spatial_dimensions_size();\n ++i) {\n permuted_conv_dims_numbers.set_input_spatial_dimensions(\n i, DimLookUp(permute_dims,\n original_conv_dims.input_spatial_dimensions(i)));\n }\n const int64_t old_batch_dim = original_conv_dims.input_batch_dimension();\n const int64_t old_batch_size =\n activations_old->shape().dimensions(old_batch_dim);\n ConvDetails c =\n GetConvolutionDetails(convolution, permuted_conv_dims_numbers);\n VLOG(1) << \"Propagating on conv activations_batch_dim \"\n << activations_batch_dim << \" spatial_dimension_to_split \"\n << c.spatial_dimensions_to_split[0] << \" old_batch_size \"\n << old_batch_size;\n TF_ASSIGN_OR_RETURN(\n auto retval,\n BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers,\n activations_batch_dim, &new_spatial_dims));\n activations_new = retval.instr;\n std::vector trans_dims = retval.transpose_dims;\n CHECK(!trans_dims.empty());\n auto select_val = computation_->AddInstruction(\n HloInstruction::CreateConstant(\n LiteralUtil::Zero(activations_new->shape().element_type())),\n &convolution->metadata(), &convolution->frontend_attributes());\n TF_ASSIGN_OR_RETURN(\n activations_new,\n SelectValidPortion(activations_new, activations_old, select_val,\n activations_batch_dim, new_spatial_dims, old_batch_dim,\n old_spatial_dims));\n auto new_dim_numbers = permuted_conv_dims_numbers;\n const int64_t num_splits = ctrl_.number_of_splits;\n const int64_t output_offsets = convolution->shape().dimensions(\n permuted_conv_dims_numbers.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution)));\n const int64_t output_offsets_per_split =\n CeilOfRatio(output_offsets, num_splits);\n int64_t spatial_split_size =\n CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride;\n VLOG(1) << \"spatial size \" << c.spatial_size << \" halo size \" << c.halo_size\n << \" spatial_split_size \" << spatial_split_size;\n while (spatial_split_size * num_splits + c.halo_size - c.spatial_size < 0 ||\n spatial_split_size < c.halo_size - c.inherent_low_padding) {\n spatial_split_size += c.stride;\n }\n VLOG(1) << \"Modified spatial_split_size \" << spatial_split_size;\n const int64_t new_space_size =\n activations_new->shape().dimensions(new_spatial_dims[0]);\n int64_t slice_size = spatial_split_size + c.halo_size;\n if (spatial_split_size > new_space_size) {\n TF_ASSIGN_OR_RETURN(\n activations_new,\n ChangeSpatialSizeOnSpaceToBatchedShape(\n activations_new, activations_batch_dim, old_batch_size,\n new_spatial_dims, spatial_split_size,\n true));\n } else {\n if (spatial_split_size < new_space_size) {\n VLOG(3)\n << \"Decreasing the spatial size while propagating spatial_split_size \"\n << spatial_split_size << \" new_space_size \" << new_space_size;\n if (new_space_size % c.stride != 0 || c.base_dilation_factor != 1) {\n TF_ASSIGN_OR_RETURN(\n activations_new,\n ChangeSpatialSizeOnSpaceToBatchedShape(\n activations_new, activations_batch_dim, old_batch_size,\n new_spatial_dims, spatial_split_size));\n } else {\n const int64_t additional_space_present = spatial_split_size % c.stride;\n spatial_split_size = new_space_size;\n slice_size =\n spatial_split_size + std::max(c.kernel_spatial_dim_size - c.stride -\n additional_space_present,\n static_cast(0));\n }\n }\n }\n TF_ASSIGN_OR_RETURN(\n activations_new,\n HaloDuplicateWithSlice(\n activations_new, new_spatial_dims, activations_batch_dim,\n c.base_dilation_factor != 1 &&\n c.inherent_low_padding != 0\n ? (c.inherent_low_padding == c.base_dilation_factor ? 1 : 0)\n : c.inherent_low_padding,\n slice_size - spatial_split_size));\n const int64_t rank = (convolution->shape().rank());\n std::vector transpose_dims(rank);\n int dim_count = 0;\n std::map dim_translator;\n for (int j = 0;\n j < permuted_conv_dims_numbers.output_spatial_dimensions_size(); ++j) {\n if (j == GetFirstChosenSpatialDim(convolution)) {\n dim_translator[permuted_conv_dims_numbers.output_batch_dimension()] =\n dim_count;\n new_dim_numbers.set_output_batch_dimension(dim_count++);\n }\n dim_translator[permuted_conv_dims_numbers.output_spatial_dimensions(j)] =\n dim_count;\n new_dim_numbers.set_output_spatial_dimensions(j, dim_count);\n dim_count++;\n }\n dim_translator[permuted_conv_dims_numbers.output_feature_dimension()] =\n dim_count;\n new_dim_numbers.set_output_feature_dimension(dim_count);\n int p = 0;\n for (const auto& entry : dim_translator) {\n transpose_dims[p] = entry.second;\n p++;\n }\n auto new_window = convolution->window();\n const int64_t first_dim = GetFirstChosenSpatialDim(convolution);\n for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {\n new_window.mutable_dimensions(first_dim + i)\n ->set_padding_high(c.high_padding_for_conv);\n new_window.mutable_dimensions(first_dim + i)\n ->set_padding_low(c.low_padding_for_conv);\n }\n TF_ASSIGN_OR_RETURN(\n HloInstruction * new_conv,\n MakeConvolveHlo(\n activations_new, convolution->mutable_operand(1),\n convolution->feature_group_count(), convolution->batch_group_count(),\n new_window, new_dim_numbers, convolution->precision_config(),\n convolution->shape().element_type()));\n convolution->SetupDerivedInstruction(new_conv);\n old_to_new_instrs_[convolution] = new_conv;\n VLOG(1) << \"Space-to-batched convolution \" << new_conv->ToString();\n std::vector dim_map(kNumMappedDims);\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =\n original_conv_dims.output_batch_dimension();\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =\n original_conv_dims.output_feature_dimension();\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =\n original_conv_dims.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n instr_to_dim_map_[convolution] = dim_map;\n instr_to_dim_permute_map_[new_conv] = std::vector(transpose_dims);\n convs_to_visit_.erase(convolution);\n return absl::OkStatus();\n}\nabsl::Status ConvolutionVisitor::PropagateOnConcat(HloInstruction* concat) {\n auto first_operand = old_to_new_instrs_[concat->mutable_operand(0)];\n auto permute_dims = instr_to_dim_permute_map_[first_operand];\n const int64_t new_concat_dim =\n DimLookUp(permute_dims, concat->concatenate_dimension());\n std::vector new_operands(concat->operand_count());\n for (int64_t i = 0; i < concat->operand_count(); ++i) {\n new_operands[i] = old_to_new_instrs_[concat->mutable_operand(i)];\n }\n TF_ASSIGN_OR_RETURN(\n HloInstruction * new_concat,\n MakeConcatHlo(new_operands, new_concat_dim, &concat->metadata(),\n &concat->frontend_attributes()));\n old_to_new_instrs_[concat] = new_concat;\n instr_to_dim_map_[concat] =\n std::vector(instr_to_dim_map_[concat->mutable_operand(0)]);\n instr_to_dim_permute_map_[new_concat] =\n std::vector(instr_to_dim_permute_map_[first_operand]);\n return absl::OkStatus();\n}\nabsl::Status ConvolutionVisitor::PropagateOnReverse(HloInstruction* reverse) {\n auto first_operand = old_to_new_instrs_[reverse->mutable_operand(0)];\n auto permute_dims = instr_to_dim_permute_map_[first_operand];\n std::vector new_reverse_dimensions(reverse->dimensions().size());\n int dim_count = 0;\n for (auto dim : reverse->dimensions()) {\n new_reverse_dimensions[dim_count++] = DimLookUp(permute_dims, dim);\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * new_reverse,\n MakeReverseHlo(first_operand, new_reverse_dimensions));\n old_to_new_instrs_[reverse] = new_reverse;\n instr_to_dim_map_[reverse] =\n std::vector(instr_to_dim_map_[reverse->mutable_operand(0)]);\n instr_to_dim_permute_map_[new_reverse] =\n std::vector(instr_to_dim_permute_map_[first_operand]);\n return absl::OkStatus();\n}\nabsl::Status ConvolutionVisitor::PropagateOnPad(HloInstruction* pad) {\n auto first_operand = old_to_new_instrs_[pad->mutable_operand(0)];\n auto permute_dims = instr_to_dim_permute_map_[first_operand];\n PaddingConfig padding_config;\n for (int i = 0; i < pad->shape().rank(); ++i) {\n auto dimension = padding_config.add_dimensions();\n const int64_t old_dim = ReverseDimLookUp(permute_dims, i);\n auto old_padding = pad->padding_config().dimensions(old_dim);\n dimension->set_edge_padding_low(old_padding.edge_padding_low());\n dimension->set_edge_padding_high(old_padding.edge_padding_high());\n dimension->set_interior_padding(old_padding.interior_padding());\n }\n HloInstruction* padding = pad->mutable_operand(1);\n TF_ASSIGN_OR_RETURN(auto new_pad,\n MakePadHlo(first_operand, padding, padding_config,\n &first_operand->metadata(),\n &first_operand->frontend_attributes()));\n old_to_new_instrs_[pad] = new_pad;\n instr_to_dim_map_[pad] =\n std::vector(instr_to_dim_map_[pad->mutable_operand(0)]);\n instr_to_dim_permute_map_[new_pad] =\n std::vector(instr_to_dim_permute_map_[first_operand]);\n return absl::OkStatus();\n}\nabsl::Status ConvolutionVisitor::PropagateOnSlice(HloInstruction* slice) {\n auto operand = old_to_new_instrs_[slice->mutable_operand(0)];\n auto permute_dims = instr_to_dim_permute_map_[operand];\n DimensionVector starts(slice->shape().rank());\n DimensionVector limits(slice->shape().rank());\n DimensionVector strides(slice->shape().rank());\n for (int i = 0; i < slice->shape().rank(); ++i) {\n const int64_t old_dim = ReverseDimLookUp(permute_dims, i);\n if (slice->shape().dimensions(old_dim) ==\n slice->operand(0)->shape().dimensions(old_dim)) {\n starts[i] = 0;\n strides[i] = 1;\n limits[i] = operand->shape().dimensions(i);\n continue;\n }\n starts[i] = slice->slice_starts(old_dim);\n strides[i] = slice->slice_strides(old_dim);\n limits[i] = slice->slice_limits(old_dim);\n }\n TF_ASSIGN_OR_RETURN(\n auto new_slice,\n MakeSliceHlo(operand, starts, limits, strides, &operand->metadata(),\n &operand->frontend_attributes()));\n old_to_new_instrs_[slice] = new_slice;\n instr_to_dim_map_[slice] =\n std::vector(instr_to_dim_map_[slice->mutable_operand(0)]);\n instr_to_dim_permute_map_[new_slice] =\n std::vector(instr_to_dim_permute_map_[operand]);\n return absl::OkStatus();\n}\nabsl::StatusOr ConvolutionVisitor::TransposeAndMergeBatch(\n HloInstruction* activations,\n absl::Span final_split_spatial_dim_positioning,\n int64_t activations_batch_dim, int64_t old_batch_size) {\n const int64_t spatial_dim_count = final_split_spatial_dim_positioning.size();\n if (final_split_spatial_dim_positioning.size() > 1) {\n int64_t start_batch_dim_position = activations_batch_dim + 1;\n int64_t start_space_dim_position =\n start_batch_dim_position + spatial_dim_count;\n std::vector trans_dims(activations->shape().dimensions_size());\n absl::c_iota(trans_dims, 0);\n for (int i = 0; i < spatial_dim_count; ++i) {\n trans_dims[start_batch_dim_position + i] =\n start_batch_dim_position + (spatial_dim_count - 1 - i) * 2;\n trans_dims[start_space_dim_position + i] =\n start_batch_dim_position + i * 2 + 1;\n }\n TF_ASSIGN_OR_RETURN(activations, MakeTransposeHlo(activations, trans_dims));\n }\n std::vector batch_collapse_reshape_dims(\n activations->shape().dimensions().begin(),\n activations->shape().dimensions().end());\n const int64_t collapsed_batch_size =\n old_batch_size * IPow(ctrl_.number_of_splits, spatial_dim_count);\n batch_collapse_reshape_dims.erase(\n batch_collapse_reshape_dims.begin() + activations_batch_dim,\n batch_collapse_reshape_dims.begin() + activations_batch_dim +\n spatial_dim_count);\n batch_collapse_reshape_dims[activations_batch_dim] = collapsed_batch_size;\n TF_ASSIGN_OR_RETURN(HloInstruction * batch_collapsed_reshape,\n MakeReshapeHlo(batch_collapse_reshape_dims, activations));\n return batch_collapsed_reshape;\n}\nabsl::StatusOr ConvolutionVisitor::PerformSplitSpace(\n HloInstruction* activations,\n absl::Span spatial_dimensions_to_split,\n int64_t activations_batch_dim, int64_t spatial_split_size,\n int64_t num_splits) {\n const int64_t old_batch_size =\n activations->shape().dimensions(activations_batch_dim);\n std::vector reshape_dimensions(\n activations->shape().dimensions().begin(),\n activations->shape().dimensions().end());\n for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {\n reshape_dimensions[spatial_dimension_to_split] = spatial_split_size;\n }\n int counter = 0;\n for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {\n reshape_dimensions.insert(\n reshape_dimensions.begin() + (spatial_dimension_to_split + counter),\n num_splits);\n counter++;\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * batch_increased_reshape,\n MakeReshapeHlo(reshape_dimensions, activations));\n return TransposeAndMergeBatch(\n batch_increased_reshape,\n spatial_dimensions_to_split,\n activations_batch_dim, old_batch_size);\n}\nabsl::StatusOr ConvolutionVisitor::PadAndSplitSpace(\n HloInstruction* activations,\n absl::Span spatial_dimensions_to_split,\n int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding,\n int64_t spatial_split_size, int64_t num_splits) {\n const int64_t old_batch_size =\n activations->shape().dimensions(activations_batch_dim);\n if (high_padding || low_padding) {\n PaddingConfig padding_config =\n MakeNoPaddingConfig(activations->shape().dimensions_size());\n for (auto spatial_dimension_to_split : spatial_dimensions_to_split) {\n padding_config.mutable_dimensions(spatial_dimension_to_split)\n ->set_edge_padding_high(high_padding);\n padding_config.mutable_dimensions(spatial_dimension_to_split)\n ->set_edge_padding_low(low_padding);\n }\n HloInstruction* padding = computation_->AddInstruction(\n HloInstruction::CreateConstant(\n LiteralUtil::Zero(activations->shape().element_type())),\n &activations->metadata(), &activations->frontend_attributes());\n TF_ASSIGN_OR_RETURN(activations,\n MakePadHlo(activations, padding, padding_config,\n &activations->metadata(),\n &activations->frontend_attributes()));\n }\n VLOG(1) << \"Initial padded activations shape \"\n << activations->shape().ToString() << \" old_batch_size \"\n << old_batch_size << \" activations_batch_dim \"\n << activations_batch_dim;\n return PerformSplitSpace(activations, spatial_dimensions_to_split,\n activations_batch_dim, spatial_split_size,\n num_splits);\n}\nabsl::StatusOr>>\nConvolutionVisitor::SplitSpace(\n HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers,\n int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding,\n int64_t spatial_split_size, int64_t num_splits,\n std::vector* spatial_dimensions_to_split, bool is_backprop,\n bool is_rhs) {\n TF_ASSIGN_OR_RETURN(\n auto retval,\n BringSpaceNextToBatch(activations, dim_numbers, activations_batch_dim,\n spatial_dimensions_to_split, is_backprop, is_rhs));\n activations = retval.instr;\n std::vector transpose_dims = retval.transpose_dims;\n TF_ASSIGN_OR_RETURN(\n auto new_activations,\n PadAndSplitSpace(activations, *spatial_dimensions_to_split,\n activations_batch_dim, high_padding, low_padding,\n spatial_split_size, num_splits));\n return std::make_pair(new_activations, transpose_dims);\n}\nabsl::StatusOr ConvolutionVisitor::PropagateOnConstant(\n HloInstruction* consumer, HloInstruction* producer) {\n CHECK(old_to_new_instrs_.contains(producer));\n HloInstruction* new_producer = old_to_new_instrs_[producer];\n auto prod_transpose_dims = instr_to_dim_permute_map_[new_producer];\n std::vector reversed_transpose_dims(prod_transpose_dims.size());\n for (int64_t i = 0; i < prod_transpose_dims.size(); ++i) {\n reversed_transpose_dims[i] = ReverseDimLookUp(prod_transpose_dims, i);\n }\n TF_ASSIGN_OR_RETURN(consumer,\n MakeTransposeHlo(consumer, reversed_transpose_dims));\n auto retval = GetSpatialDimsToSplit(producer);\n std::vector old_spatial_dims = retval.first;\n std::vector new_spatial_dims = retval.second;\n auto dim_map = instr_to_dim_map_[producer];\n const int64_t old_batch_dim = dim_map[DimMapper(SpaceToBatchDimMap::kBatch)];\n const int64_t old_space_dim = old_spatial_dims[0];\n const int64_t new_batch_dim = DimLookUp(prod_transpose_dims, old_batch_dim);\n const int64_t new_space_dim = new_spatial_dims[0];\n const int64_t old_batch_size = producer->shape().dimensions(old_batch_dim);\n const int64_t new_batch_size = old_batch_size * ctrl_.number_of_splits;\n const int64_t high_padding =\n (new_batch_size * new_producer->shape().dimensions(new_space_dim) -\n old_batch_size * producer->shape().dimensions(old_space_dim)) /\n old_batch_size;\n auto new_consumer = PadAndSplitSpace(\n consumer, new_spatial_dims, new_batch_dim, high_padding,\n 0, new_producer->shape().dimensions(new_space_dim),\n ctrl_.number_of_splits);\n return new_consumer;\n}\nabsl::Status ConvolutionVisitor::PropagateOnBackpropFilterConv(\n HloInstruction* convolution) {\n auto activations_old = convolution->mutable_operand(0);\n const int64_t rhs_dilation =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .window_dilation();\n auto original_conv_dims = convolution->convolution_dimension_numbers();\n std::vector old_split_spatial_dims(\n ctrl_.dimension_from_end_to_convert),\n old_split_kernel_spatial_dims(ctrl_.dimension_from_end_to_convert);\n for (int i = 0; i < ctrl_.dimension_from_end_to_convert; ++i) {\n old_split_spatial_dims[i] = original_conv_dims.input_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution) + i);\n old_split_kernel_spatial_dims[i] =\n original_conv_dims.kernel_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution) + i);\n }\n auto kernel_old = convolution->mutable_operand(1);\n const int64_t old_kernel_split_dim_size =\n kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]);\n int64_t old_split_dim_size =\n activations_old->shape().dimensions(old_split_spatial_dims[0]);\n int64_t old_batch_dim = original_conv_dims.input_feature_dimension();\n int64_t kernel_old_batch_dim =\n original_conv_dims.kernel_input_feature_dimension();\n const int64_t old_batch_size =\n activations_old->shape().dimensions(old_batch_dim);\n CHECK(old_to_new_instrs_.contains(kernel_old) ||\n old_to_new_instrs_.contains(activations_old));\n HloInstruction* activations_new = nullptr;\n HloInstruction* kernel_new = nullptr;\n bool activations_locally_space_to_batched = false;\n bool kernel_locally_space_to_batched = false;\n std::vector permute_dims_kernel, permute_dims;\n if (old_to_new_instrs_.contains(activations_old)) {\n activations_new = old_to_new_instrs_[activations_old];\n permute_dims = instr_to_dim_permute_map_[activations_new];\n }\n if (old_to_new_instrs_.contains(kernel_old)) {\n kernel_new = old_to_new_instrs_[kernel_old];\n permute_dims_kernel = instr_to_dim_permute_map_[kernel_new];\n }\n if (!old_to_new_instrs_.contains(activations_old)) {\n kernel_new = old_to_new_instrs_[kernel_old];\n permute_dims_kernel = instr_to_dim_permute_map_[kernel_new];\n VLOG(1) << \"Space-to-batching activations to enable space-to-depth\";\n const int64_t new_kernel_space_dim =\n DimLookUp(permute_dims_kernel, old_split_kernel_spatial_dims[0]);\n const int64_t new_kernel_split_dim_size =\n kernel_new->shape().dimensions(new_kernel_space_dim);\n const int64_t needed_spatial_size =\n rhs_dilation * new_kernel_split_dim_size;\n const int64_t pad_size =\n needed_spatial_size * ctrl_.number_of_splits - old_split_dim_size;\n ConvolutionDimensionNumbers tmp_dim_numbers;\n tmp_dim_numbers = original_conv_dims;\n TF_ASSIGN_OR_RETURN(\n auto retval, SplitSpace(activations_old, tmp_dim_numbers, old_batch_dim,\n pad_size, 0,\n needed_spatial_size, ctrl_.number_of_splits,\n &old_split_spatial_dims,\n true));\n activations_new = retval.first;\n std::vector reversed_transpose_dims(retval.second.size());\n for (int64_t i = 0; i < retval.second.size(); ++i) {\n reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i);\n }\n permute_dims = reversed_transpose_dims;\n VLOG(3) << \"New Activations \" << retval.first->ToString();\n activations_locally_space_to_batched = true;\n } else if (!old_to_new_instrs_.contains(kernel_old)) {\n activations_new = old_to_new_instrs_[activations_old];\n permute_dims = instr_to_dim_permute_map_[activations_new];\n VLOG(1) << \"Space-to-batching kernel to enable space-to-depth\";\n const int64_t new_space_dim =\n DimLookUp(permute_dims, old_split_spatial_dims[0]);\n const int64_t new_split_dim_size =\n activations_new->shape().dimensions(new_space_dim);\n const int64_t needed_spatial_size =\n CeilOfRatio(new_split_dim_size, rhs_dilation);\n int64_t old_kernel_split_dim_size =\n kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]);\n const int64_t pad_size = needed_spatial_size * ctrl_.number_of_splits -\n old_kernel_split_dim_size;\n ConvolutionDimensionNumbers tmp_dim_numbers;\n tmp_dim_numbers = original_conv_dims;\n TF_ASSIGN_OR_RETURN(\n auto retval,\n SplitSpace(kernel_old, tmp_dim_numbers, kernel_old_batch_dim,\n pad_size, 0,\n needed_spatial_size, ctrl_.number_of_splits,\n &old_split_kernel_spatial_dims,\n true, true));\n kernel_new = retval.first;\n std::vector reversed_transpose_dims(retval.second.size());\n for (int64_t i = 0; i < retval.second.size(); ++i) {\n reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i);\n }\n permute_dims_kernel = reversed_transpose_dims;\n VLOG(3) << \"New kernel \" << retval.first->ToString();\n kernel_locally_space_to_batched = true;\n }\n CHECK_NE(activations_new, nullptr);\n CHECK_NE(kernel_new, nullptr);\n const int64_t new_spatial_dimension =\n activations_new->shape().dimensions_size();\n auto permuted_conv_dims_numbers = original_conv_dims;\n int64_t activations_batch_dim =\n DimLookUp(permute_dims, original_conv_dims.input_feature_dimension());\n int64_t activations_feature_dim =\n DimLookUp(permute_dims, original_conv_dims.input_batch_dimension());\n const int64_t previous_spatial_dim_count =\n original_conv_dims.input_spatial_dimensions_size();\n for (int64_t i = 0; i < previous_spatial_dim_count; ++i) {\n permuted_conv_dims_numbers.set_input_spatial_dimensions(\n i, DimLookUp(permute_dims,\n original_conv_dims.input_spatial_dimensions(i)));\n permuted_conv_dims_numbers.set_kernel_spatial_dimensions(\n i, DimLookUp(permute_dims_kernel,\n original_conv_dims.kernel_spatial_dimensions(i)));\n }\n permuted_conv_dims_numbers.add_input_spatial_dimensions(\n new_spatial_dimension);\n permuted_conv_dims_numbers.add_kernel_spatial_dimensions(\n new_spatial_dimension);\n permuted_conv_dims_numbers.add_output_spatial_dimensions(\n new_spatial_dimension);\n const int64_t previous_chosen_spatial_dim_in_output =\n permuted_conv_dims_numbers.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n permuted_conv_dims_numbers.set_output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution), new_spatial_dimension);\n permuted_conv_dims_numbers.set_output_spatial_dimensions(\n previous_spatial_dim_count, previous_chosen_spatial_dim_in_output);\n const int64_t kernel_input_feature_dim = DimLookUp(\n permute_dims_kernel, original_conv_dims.kernel_input_feature_dimension());\n const int64_t kernel_output_feature_dim =\n DimLookUp(permute_dims_kernel,\n original_conv_dims.kernel_output_feature_dimension());\n permuted_conv_dims_numbers.set_kernel_input_feature_dimension(\n kernel_input_feature_dim);\n permuted_conv_dims_numbers.set_kernel_output_feature_dimension(\n kernel_output_feature_dim);\n std::vector spatial_dimensions_to_split(\n ctrl_.count_of_dimensions_to_convert);\n const int64_t first_dim_to_split = GetFirstChosenSpatialDim(convolution);\n for (int64_t i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {\n spatial_dimensions_to_split[i] =\n permuted_conv_dims_numbers.input_spatial_dimensions(first_dim_to_split +\n i);\n }\n const int64_t kernel_spatial_dimension_to_split =\n permuted_conv_dims_numbers.kernel_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n int64_t new_split_dim_size =\n activations_new->shape().dimensions(spatial_dimensions_to_split[0]);\n const int64_t kernel_new_split_dim_size =\n kernel_new->shape().dimensions(kernel_spatial_dimension_to_split);\n permuted_conv_dims_numbers.set_input_batch_dimension(activations_feature_dim);\n permuted_conv_dims_numbers.set_input_feature_dimension(activations_batch_dim);\n VLOG(1) << \"Propagating on conv activations_batch_dim \"\n << activations_batch_dim << \" spatial_dimension_to_split \"\n << spatial_dimensions_to_split[0] << \" old_batch_size \"\n << old_batch_size << \" new_split_dim_size \" << new_split_dim_size;\n TF_ASSIGN_OR_RETURN(\n auto retval,\n BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers,\n activations_batch_dim, &spatial_dimensions_to_split,\n true));\n int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0];\n std::vector transpose_dims = retval.transpose_dims;\n CHECK(!transpose_dims.empty());\n activations_new = retval.instr;\n VLOG(1) << \"Activations_new post BringSpaceNextToBatch \"\n << activations_new->ToString();\n VLOG(1) << \"activations_batch_dim \" << activations_batch_dim\n << \" activations_feature_dim \" << activations_feature_dim;\n const int64_t expected_split_dim_size =\n rhs_dilation * kernel_new_split_dim_size;\n if (new_split_dim_size != expected_split_dim_size) {\n CHECK_LT(new_split_dim_size, expected_split_dim_size);\n new_split_dim_size = expected_split_dim_size;\n TF_ASSIGN_OR_RETURN(\n activations_new,\n ChangeSpatialSizeOnSpaceToBatchedShape(\n activations_new, activations_batch_dim, old_batch_size,\n spatial_dimensions_to_split, new_split_dim_size, true));\n }\n spatial_dimension_to_split = spatial_dimensions_to_split[0];\n auto select_val = computation_->AddInstruction(\n HloInstruction::CreateConstant(\n LiteralUtil::Zero(activations_new->shape().element_type())),\n &activations_new->metadata(), &activations_new->frontend_attributes());\n if (!activations_locally_space_to_batched) {\n TF_ASSIGN_OR_RETURN(\n activations_new,\n SelectValidPortion(activations_new, activations_old, select_val,\n activations_batch_dim, spatial_dimensions_to_split,\n old_batch_dim, old_split_spatial_dims));\n }\n if (!kernel_locally_space_to_batched) {\n VLOG(3) << \"Selecting the valid kernel area\";\n std::vector new_kernel_split_spatial_dims(\n ctrl_.dimension_from_end_to_convert);\n new_kernel_split_spatial_dims[0] = kernel_spatial_dimension_to_split;\n TF_ASSIGN_OR_RETURN(\n kernel_new,\n SelectValidPortion(kernel_new, kernel_old, select_val,\n kernel_input_feature_dim,\n new_kernel_split_spatial_dims,\n original_conv_dims.kernel_input_feature_dimension(),\n old_split_kernel_spatial_dims));\n }\n auto new_dim_numbers = permuted_conv_dims_numbers;\n VLOG(2) << \"New dim numbers \" << new_dim_numbers.DebugString();\n const int64_t inherent_low_padding =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .padding_low();\n const int64_t inherent_high_padding =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .padding_high();\n std::vector activations_chunks;\n for (int64_t i = 0; i < inherent_low_padding; ++i) {\n HloInstruction* activations_to_use = nullptr;\n if (i == 0) {\n activations_to_use = activations_new;\n } else {\n activations_to_use = activations_chunks.back();\n }\n TF_ASSIGN_OR_RETURN(\n HloInstruction * activations_slice,\n HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split,\n activations_batch_dim, 1,\n 0));\n activations_chunks.push_back(activations_slice);\n }\n absl::c_reverse(activations_chunks);\n const int64_t expanded_kernel =\n old_kernel_split_dim_size * rhs_dilation - (rhs_dilation - 1);\n const int64_t overlap_count =\n old_split_dim_size - expanded_kernel + 1 +\n (inherent_low_padding < 0 ? inherent_low_padding : 0) +\n (inherent_high_padding < 0 ? inherent_high_padding : 0);\n VLOG(1) << \"overlap_count \" << overlap_count << \" inherent_low_padding \"\n << inherent_low_padding << \" inherent_high_padding \"\n << inherent_high_padding;\n const int64_t total_overlap_count =\n overlap_count + (inherent_low_padding > 0 ? inherent_low_padding : 0) +\n (inherent_high_padding > 0 ? inherent_high_padding : 0);\n for (int64_t i = 0; i < overlap_count; ++i) {\n HloInstruction* activations_to_use = nullptr;\n HloInstruction* activations_slice = nullptr;\n if (i == 0) {\n activations_to_use = activations_new;\n if (inherent_low_padding < 0) {\n TF_ASSIGN_OR_RETURN(\n activations_slice,\n HaloDuplicateWithSlice(\n activations_to_use, spatial_dimensions_to_split,\n activations_batch_dim,\n inherent_low_padding, 0));\n } else {\n activations_slice = activations_to_use;\n }\n } else {\n activations_to_use = activations_chunks.back();\n TF_ASSIGN_OR_RETURN(activations_slice,\n HaloDuplicateWithSlice(\n activations_to_use, spatial_dimensions_to_split,\n activations_batch_dim, -1,\n 0));\n }\n activations_chunks.push_back(activations_slice);\n }\n int64_t high_padding_to_materialize = 0;\n if (inherent_high_padding > 0) {\n high_padding_to_materialize =\n std::max(total_overlap_count -\n (std::max(overlap_count, static_cast(0)) +\n std::max(inherent_low_padding, static_cast(0))),\n static_cast(0));\n }\n for (int64_t i = 0; i < high_padding_to_materialize; ++i) {\n HloInstruction* activations_to_use = nullptr;\n activations_to_use = activations_chunks.back();\n TF_ASSIGN_OR_RETURN(\n HloInstruction * activations_slice,\n HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split,\n activations_batch_dim,\n -1, 0));\n activations_chunks.push_back(activations_slice);\n }\n for (int64_t i = 0; i < activations_chunks.size(); ++i) {\n std::vector input_sizes(\n activations_chunks[i]->shape().dimensions().begin(),\n activations_chunks[i]->shape().dimensions().end());\n input_sizes.push_back(1);\n TF_ASSIGN_OR_RETURN(activations_chunks[i],\n MakeReshapeHlo(input_sizes, activations_chunks[i]));\n VLOG(1) << \"new_spatial_dimension \" << new_spatial_dimension << \" slice \"\n << activations_chunks[i]->ToString();\n }\n TF_ASSIGN_OR_RETURN(\n activations_new,\n MakeConcatHlo(absl::MakeSpan(activations_chunks), new_spatial_dimension,\n &activations_old->metadata(),\n &activations_old->frontend_attributes()));\n std::vector kernel_sizes(kernel_new->shape().dimensions().begin(),\n kernel_new->shape().dimensions().end());\n kernel_sizes.push_back(1);\n TF_ASSIGN_OR_RETURN(kernel_new, MakeReshapeHlo(kernel_sizes, kernel_new));\n auto new_window = convolution->window();\n new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))\n ->set_padding_high(-(rhs_dilation - 1));\n new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))\n ->set_padding_low(0);\n new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution))\n ->set_size(CeilOfRatio(new_split_dim_size, rhs_dilation));\n auto window_dim = new_window.add_dimensions();\n window_dim->set_base_dilation(1);\n window_dim->set_size(1);\n int64_t stride = 1;\n if (inherent_low_padding > total_overlap_count) {\n stride = activations_chunks.size();\n }\n window_dim->set_stride(stride);\n window_dim->set_padding_low(0);\n window_dim->set_padding_high(0);\n window_dim->set_window_reversal(false);\n window_dim->set_window_dilation(1);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * new_conv,\n MakeConvolveHlo(\n activations_new, kernel_new, convolution->feature_group_count(),\n convolution->batch_group_count(), new_window, new_dim_numbers,\n convolution->precision_config(),\n convolution->shape().element_type()));\n convolution->SetupDerivedInstruction(new_conv);\n VLOG(2) << \"New backprop filter convolution \" << new_conv->ToString();\n std::vector output_sizes(new_conv->shape().dimensions().begin(),\n new_conv->shape().dimensions().end());\n output_sizes.erase(output_sizes.begin() +\n new_dim_numbers.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution)));\n TF_ASSIGN_OR_RETURN(new_conv, MakeReshapeHlo(output_sizes, new_conv));\n old_to_new_instrs_[convolution] = new_conv;\n VLOG(1) << \"Space-to-featured convolution \" << new_conv->ToString();\n std::vector dim_map(kNumMappedDims);\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =\n original_conv_dims.output_batch_dimension();\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =\n original_conv_dims.output_feature_dimension();\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =\n original_conv_dims.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n instr_to_dim_map_[convolution] = dim_map;\n std::vector trans_dims(convolution->shape().dimensions_size());\n absl::c_iota(trans_dims, 0);\n instr_to_dim_permute_map_[new_conv] = trans_dims;\n return absl::OkStatus();\n}\nHloInstruction*\nConvolutionVisitor::DoesConvolutionFeedReduceWindowOrSelectAndScatter(\n HloInstruction* instr, int64_t depth = kReduceWindowSearchDepth) {\n if (depth == 0) {\n return nullptr;\n }\n for (auto user : instr->users()) {\n if (user->opcode() == HloOpcode::kReduceWindow ||\n user->opcode() == HloOpcode::kSelectAndScatter) {\n return user;\n }\n if (user->opcode() == HloOpcode::kConvolution ||\n user->opcode() == HloOpcode::kPad ||\n user->opcode() == HloOpcode::kTranspose ||\n user->opcode() == HloOpcode::kDot) {\n continue;\n }\n auto ret =\n DoesConvolutionFeedReduceWindowOrSelectAndScatter(user, depth - 1);\n if (ret != nullptr) {\n return ret;\n }\n }\n return nullptr;\n}\nbool ConvolutionVisitor::DoesConvolutionFeedUnpropagatableOp(\n HloInstruction* instr, int64_t depth) {\n auto key = std::make_pair(instr, depth);\n if (unpropagatability_cache_.contains(key)) {\n return unpropagatability_cache_[key];\n }\n if (depth == 0 || instr->user_count() == 0) {\n unpropagatability_cache_[key] = false;\n return false;\n }\n for (auto user : instr->users()) {\n if (IsOpcodeNonPropagatable(user)) {\n unpropagatability_cache_[key] = true;\n return true;\n }\n int64_t depth_to_use = depth;\n if (user->opcode() == HloOpcode::kConvolution ||\n user->opcode() == HloOpcode::kDot) {\n depth_to_use--;\n }\n if (DoesConvolutionFeedUnpropagatableOp(user, depth_to_use)) {\n unpropagatability_cache_[key] = true;\n return true;\n }\n }\n unpropagatability_cache_[key] = false;\n return false;\n}\nbool ConvolutionVisitor::IsSpaceToBatchedSpaceSizeSuitable(\n HloInstruction* instr) {\n CHECK(instr->opcode() == HloOpcode::kSelectAndScatter ||\n instr->opcode() == HloOpcode::kReduceWindow);\n auto old_producer = instr->mutable_operand(0);\n auto dim_map_val_op = instr_to_dim_map_[old_producer];\n const int64_t old_space_dim =\n dim_map_val_op[DimMapper(SpaceToBatchDimMap::kSpace0)];\n auto first_operand = old_to_new_instrs_[old_producer];\n auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand];\n const int64_t new_space_dim =\n DimLookUp(permute_dims_first_operand, old_space_dim);\n const int64_t window_size = instr->window().dimensions(old_space_dim).size();\n if (first_operand->shape().dimensions(new_space_dim) < window_size) {\n return false;\n }\n return true;\n}\nConvolutionVisitor::ConvDetails ConvolutionVisitor::GetConvolutionDetails(\n HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) {\n auto activations = convolution->mutable_operand(0);\n auto kernel = convolution->mutable_operand(1);\n const auto& kernel_shape = kernel->shape();\n const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n int64_t kernel_spatial_dim_size = kernel_shape.dimensions(kernel_spatial_dim);\n if (IsForwardWindowDilatedConv(convolution, dim_numbers)) {\n const int64_t window_dilation_factor =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .window_dilation();\n kernel_spatial_dim_size =\n (kernel_spatial_dim_size - 1) * (window_dilation_factor - 1) +\n kernel_spatial_dim_size;\n }\n std::vector spatial_dimensions_to_split =\n GetChosenSpatialDims(convolution);\n const int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0];\n const int64_t input_dim_size =\n activations->shape().dimensions(spatial_dimension_to_split);\n const int64_t inherent_low_padding =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .padding_low();\n const int64_t inherent_high_padding =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .padding_high();\n const int64_t stride = convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .stride();\n const int64_t base_dilation_factor =\n convolution->window()\n .dimensions(GetFirstChosenSpatialDim(convolution))\n .base_dilation();\n bool is_base_dilated = base_dilation_factor > 1;\n const int64_t spatial_size = input_dim_size +\n (is_base_dilated ? 0 : inherent_low_padding) +\n inherent_high_padding;\n const int64_t last_overlap = base_dilation_factor == inherent_low_padding\n ? kernel_spatial_dim_size\n : kernel_spatial_dim_size - 1;\n const int64_t halo_size = is_base_dilated\n ? last_overlap / base_dilation_factor\n : kernel_spatial_dim_size - 1;\n const int64_t high_padding_for_base_dilation =\n inherent_low_padding == 0 ? base_dilation_factor - 1\n : last_overlap % base_dilation_factor;\n const int64_t high_padding_for_conv =\n is_base_dilated ? high_padding_for_base_dilation : 0;\n const int64_t low_padding_for_conv =\n is_base_dilated && (base_dilation_factor != inherent_low_padding)\n ? inherent_low_padding\n : 0;\n return ConvDetails{spatial_dimensions_to_split,\n inherent_low_padding,\n inherent_high_padding,\n stride,\n spatial_size,\n base_dilation_factor,\n halo_size,\n high_padding_for_conv,\n low_padding_for_conv,\n kernel_spatial_dim_size,\n input_dim_size};\n}\nabsl::Status ConvolutionVisitor::PerformSpaceToBatchOnConvolution(\n HloInstruction* convolution) {\n if (!ConsumeFuel(\"space-to-batch-converter\", [&] {\n return \"Skipping space-to-batch propagation because fuel over\\n\";\n })) {\n return absl::OkStatus();\n }\n VLOG(1) << \"Handling conv \" << convolution->ToString();\n ConvolutionDimensionNumbers dim_numbers =\n convolution->convolution_dimension_numbers();\n ConvDetails c = GetConvolutionDetails(convolution, dim_numbers);\n int64_t activations_batch_dim = dim_numbers.input_batch_dimension();\n auto activations = convolution->mutable_operand(0);\n VLOG(1) << \"spatial size \" << c.spatial_size;\n if (c.spatial_size < 2 * ctrl_.number_of_splits) {\n return absl::OkStatus();\n }\n auto original_conv = convolution;\n const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n const int64_t output_offsets =\n convolution->shape().dimensions(output_spatial_dim);\n const int64_t output_offsets_per_split =\n CeilOfRatio(output_offsets, ctrl_.number_of_splits);\n int64_t spatial_split_size =\n CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride;\n while (spatial_split_size * ctrl_.number_of_splits - c.spatial_size < 0) {\n spatial_split_size += c.stride;\n }\n auto reduce_window_or_select_and_scatter =\n DoesConvolutionFeedReduceWindowOrSelectAndScatter(convolution);\n if (reduce_window_or_select_and_scatter != nullptr &&\n reduce_window_or_select_and_scatter->shape().IsArray() &&\n reduce_window_or_select_and_scatter->shape().rank() ==\n convolution->shape().rank()) {\n VLOG(2)\n << \"DoesConvolutionFeedReduceWindowOrSelectAndScatter returned true\";\n const int64_t win_stride =\n std::max(reduce_window_or_select_and_scatter->window()\n .dimensions(output_spatial_dim)\n .stride(),\n static_cast(1));\n CHECK_NE(win_stride, 0)\n << \"Bad op \" << reduce_window_or_select_and_scatter->ToString();\n CHECK_NE(c.stride, 0) << \"Bad op \" << convolution->ToString();\n while ((spatial_split_size / c.stride) % win_stride != 0) {\n spatial_split_size += c.stride;\n }\n }\n const int64_t slice_size = spatial_split_size + c.halo_size;\n const int64_t low_pad_to_handle_base_dilation =\n (c.base_dilation_factor > 1 &&\n c.base_dilation_factor == c.inherent_low_padding)\n ? 1\n : 0;\n int64_t pad_size =\n spatial_split_size * ctrl_.number_of_splits - c.spatial_size;\n bool handle_low_pad_in_first_reshape = false;\n if (pad_size > low_pad_to_handle_base_dilation) {\n pad_size -= low_pad_to_handle_base_dilation;\n handle_low_pad_in_first_reshape = true;\n }\n VLOG(1) << \"spatial_split_size \" << spatial_split_size << \" stride \"\n << c.stride << \" slice_size \" << slice_size;\n VLOG(1) << \"spatial_dimension_to_split \" << c.spatial_dimensions_to_split[0]\n << \" num_splits \" << ctrl_.number_of_splits\n << \" kernel_spatial_dim_size \" << c.kernel_spatial_dim_size;\n std::vector spatial_dimensions_to_split =\n c.spatial_dimensions_to_split;\n TF_ASSIGN_OR_RETURN(\n auto retval,\n SplitSpace(\n activations, dim_numbers, activations_batch_dim,\n c.inherent_high_padding + pad_size,\n c.base_dilation_factor == 1 ? c.inherent_low_padding\n : handle_low_pad_in_first_reshape ? low_pad_to_handle_base_dilation\n : 0,\n spatial_split_size, ctrl_.number_of_splits,\n &spatial_dimensions_to_split));\n HloInstruction* batch_increased_reshape = retval.first;\n convolution->SetupDerivedInstruction(batch_increased_reshape);\n VLOG(1) << \"First reshape done \" << batch_increased_reshape->ToString();\n TF_ASSIGN_OR_RETURN(\n activations,\n HaloDuplicateWithSlice(\n batch_increased_reshape, spatial_dimensions_to_split,\n activations_batch_dim,\n handle_low_pad_in_first_reshape ? 0 : low_pad_to_handle_base_dilation,\n c.halo_size));\n VLOG(1) << \"Batch merge done \" << activations->ToString();\n auto new_dim_numbers = dim_numbers;\n const int64_t rank = convolution->shape().rank();\n std::vector transpose_dims(rank);\n int dim_count = 0;\n std::map dim_translator;\n for (int j = 0; j < dim_numbers.output_spatial_dimensions_size(); ++j) {\n if (j == GetFirstChosenSpatialDim(convolution)) {\n dim_translator[dim_numbers.output_batch_dimension()] = dim_count;\n new_dim_numbers.set_output_batch_dimension(dim_count++);\n }\n dim_translator[dim_numbers.output_spatial_dimensions(j)] = dim_count;\n new_dim_numbers.set_output_spatial_dimensions(j, dim_count);\n dim_count++;\n }\n dim_translator[dim_numbers.output_feature_dimension()] = dim_count;\n new_dim_numbers.set_output_feature_dimension(dim_count);\n int p = 0;\n for (const auto& entry : dim_translator) {\n transpose_dims[p] = entry.second;\n p++;\n }\n VLOG(1) << \"New dim numbers \" << new_dim_numbers.DebugString()\n << \" batch dim \" << new_dim_numbers.input_batch_dimension();\n auto new_window = convolution->window();\n const int64_t first_dim = GetFirstChosenSpatialDim(convolution);\n for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {\n new_window.mutable_dimensions(first_dim + i)\n ->set_padding_high(c.high_padding_for_conv);\n new_window.mutable_dimensions(first_dim + i)\n ->set_padding_low(c.low_padding_for_conv);\n }\n TF_ASSIGN_OR_RETURN(\n HloInstruction * new_conv,\n MakeConvolveHlo(\n activations, convolution->mutable_operand(1),\n convolution->feature_group_count(), convolution->batch_group_count(),\n new_window, new_dim_numbers, convolution->precision_config(),\n convolution->shape().element_type(),\n &convolution->metadata(), &convolution->frontend_attributes()));\n convolution->SetupDerivedInstruction(new_conv);\n batch_to_space_map_[convolution->mutable_operand(0)] =\n convolution->mutable_operand(0);\n VLOG(1) << \"Space-to-batched convolution \" << new_conv->ToString();\n std::vector new_output_split_spatial_dims(\n ctrl_.count_of_dimensions_to_convert),\n old_output_split_spatial_dims(ctrl_.count_of_dimensions_to_convert);\n for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) {\n old_output_split_spatial_dims[i] =\n dim_numbers.output_spatial_dimensions(first_dim + i);\n new_output_split_spatial_dims[i] =\n new_dim_numbers.output_spatial_dimensions(first_dim + i);\n }\n const int64_t output_batch_dim = new_dim_numbers.output_batch_dimension();\n auto select_val = computation_->AddInstruction(\n HloInstruction::CreateConstant(\n LiteralUtil::Zero(new_conv->shape().element_type())),\n &convolution->metadata(), &convolution->frontend_attributes());\n TF_ASSIGN_OR_RETURN(\n new_conv,\n SelectValidPortion(new_conv, original_conv, select_val, output_batch_dim,\n new_output_split_spatial_dims,\n dim_numbers.output_batch_dimension(),\n old_output_split_spatial_dims));\n old_to_new_instrs_[original_conv] = new_conv;\n std::vector dim_map(kNumMappedDims);\n dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] =\n dim_numbers.output_batch_dimension();\n dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] =\n dim_numbers.output_feature_dimension();\n dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] =\n dim_numbers.output_spatial_dimensions(\n GetFirstChosenSpatialDim(convolution));\n instr_to_dim_map_[original_conv] = dim_map;\n instr_to_dim_permute_map_[new_conv] = std::vector(transpose_dims);\n if (non_propagatable_instrs_.count(convolution) > 0) {\n non_propagatable_instrs_.erase(convolution);\n }\n TF_CHECK_OK(PropagateOnUsers(original_conv));\n return absl::OkStatus();\n}\n} \nabsl::StatusOr SpaceToBatchConverter::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n XLA_VLOG_LINES(\n 2, \"SpaceToBatchConverter::Run(), before:\\n\" + module->ToString());\n bool changed = false;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n ConvolutionVisitor visitor(ctrl_, comp);\n if (visitor.Run().value()) {\n changed = true;\n }\n VLOG(1) << \"Done operating on computation\";\n }\n XLA_VLOG_LINES(2,\n \"SpaceToBatchConverter::Run(), after:\\n\" + module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/space_to_batch_converter.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/types.h\"\nnamespace xla {\nnamespace {\nusing SpaceToBatchConverterTest = HloTestBase;\nnamespace op = testing::opcode_matchers;\nTEST_F(SpaceToBatchConverterTest, SimpleBatch1) {\n std::string hlo_string = R\"(\n HloModule module\nENTRY computation {\n %p0 = bf16[1,258,258,32] parameter(0)\n %p1 = bf16[3,3,32,32] parameter(1)\n ROOT %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, \n dim_labels=b01f_01io->b01f\n}\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Transpose());\n EXPECT_THAT(root->operand(0), op::Slice());\n auto reshape = root->operand(0)->operand(0);\n EXPECT_THAT(reshape, op::Reshape());\n auto previous_reshape = reshape->operand(0);\n EXPECT_THAT(previous_reshape, op::Reshape());\n EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());\n const int64_t batch_dim = previous_reshape->operand(0)\n ->operand(1)\n ->convolution_dimension_numbers()\n .output_batch_dimension();\n EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 1);\n}\nTEST_F(SpaceToBatchConverterTest, SimpleBatch1ConvXpose) {\n std::string hlo_string = R\"(\n HloModule module\nENTRY computation {\n %p0 = bf16[1,258,258,32] parameter(0)\n %p1 = bf16[3,3,32,32] parameter(1)\n %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, \n dim_labels=b01f_01io->b01f\n ROOT tr = bf16[1,256,256,32] transpose(%convolution), dimensions={0,2,1,3}\n}\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Transpose());\n EXPECT_THAT(root->operand(0), op::Slice());\n auto reshape = root->operand(0)->operand(0);\n EXPECT_THAT(reshape, op::Reshape());\n auto previous_reshape = reshape->operand(0);\n EXPECT_THAT(previous_reshape, op::Reshape());\n EXPECT_THAT(previous_reshape->operand(0), op::Select());\n EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());\n}\nTEST_F(SpaceToBatchConverterTest, SimpleBatch1WithReduceWindow) {\n std::string hlo_string = R\"(\n HloModule module \n adder (lhs: bf16[], rhs: bf16[]) -> bf16[] {\n lhs = bf16[] parameter(0)\n rhs = bf16[] parameter(1)\n ROOT add = bf16[] add(lhs, rhs)\n }\n ENTRY computation {\n %p0 = bf16[1,258,258,32] parameter(0)\n %p1 = bf16[3,3,32,32] parameter(1)\n %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},\n dim_labels=b01f_01io->b01f\n %constant = bf16[3] constant({1.0, 2.0, 3.0})\n %tuple = (bf16[1,256,256,32], bf16[3])tuple(%convolution, %constant)\n ROOT %gte = bf16[1,256,256,32] get-tuple-element(%tuple), index=0\n %gte2 = bf16[3]get-tuple-element(%tuple), index=1\n %init = bf16[] constant(1.0)\n %reduce-window = bf16[3] reduce-window(bf16[3] %gte2, bf16[] %init),\n window={size=1}, to_apply=%adder\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n}\nTEST_F(SpaceToBatchConverterTest, SimpleBatch2) {\n std::string hlo_string = R\"(\n HloModule module\n ENTRY computation {\n %p0 = bf16[2,258,258,32] parameter(0)\n %p1 = bf16[3,3,32,32] parameter(1)\n ROOT %convolution = bf16[2,256,256,32] convolution(%p0, %p1), window={size=3x3},\n dim_labels=b01f_01io->b01f\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 1});\n ASSERT_FALSE(converter.Run(module.get()).value());\n}\nTEST_F(SpaceToBatchConverterTest, UnpropagatableOp) {\n std::string hlo_string = R\"(\n HloModule module\n ENTRY comp {\n %reduce-window = bf16[1,76,76,64]{3,2,1,0} parameter(0)\n %convert.13 = bf16[3,3,64,64]{3,2,1,0} parameter(1)\n %convolution.1 = bf16[64,76,76,1]{0,2,1,3} convolution( \n %reduce-window, %convert.13), window={size=3x3 pad=1_1x1_1}, \n dim_labels=b01f_01io->f01b\n ROOT custom-call.5079 = bf16[64,152,152,1]{0,2,1,3} custom-call(%convolution.1),\n custom_call_target=\"ResizeNearest\"\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 1});\n ASSERT_FALSE(converter.Run(module.get()).value());\n}\nTEST_F(SpaceToBatchConverterTest, Batch1WithStrideAndPad) {\n std::string hlo_string = R\"(\n HloModule module\n ENTRY computation {\n %p0 = bf16[1,224,224,3]{3,2,1,0} parameter(0)\n %p1 = bf16[7,7,3,64]{3,2,1,0} parameter(1)\n ROOT %convolution.3 = bf16[1,112,112,64]{3,2,1,0} convolution(%p0, %p1), \n window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 4});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Transpose());\n EXPECT_THAT(root->operand(0), op::Slice());\n auto reshape = root->operand(0)->operand(0);\n EXPECT_THAT(reshape, op::Reshape());\n auto previous_reshape = reshape->operand(0);\n EXPECT_THAT(previous_reshape, op::Reshape());\n EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());\n const int64_t batch_dim = previous_reshape->operand(0)\n ->operand(1)\n ->convolution_dimension_numbers()\n .output_batch_dimension();\n EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);\n}\nTEST_F(SpaceToBatchConverterTest, Batch1WithBaseDilation) {\n std::string hlo_string = R\"(\n HloModule module\nENTRY computation {\n %p2 = bf16[1,28,28,128]{3,0,2,1} parameter(0)\n %p3 = bf16[1,1,512,128]{3,2,1,0} parameter(1)\n ROOT %c = bf16[1,56,56,512]{3,0,2,1} convolution(%p2, %p3),\n window={size=1x1 pad=0_1x0_1 lhs_dilate=2x2 rhs_reversal=1x1},\n dim_labels=b01f_01oi->b01f\n}\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Transpose());\n EXPECT_THAT(root->operand(0), op::Slice());\n auto reshape = root->operand(0)->operand(0);\n EXPECT_THAT(reshape, op::Reshape());\n auto previous_reshape = reshape->operand(0);\n EXPECT_THAT(previous_reshape, op::Reshape());\n EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution());\n const int64_t batch_dim = previous_reshape->operand(0)\n ->operand(1)\n ->convolution_dimension_numbers()\n .output_batch_dimension();\n EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4);\n}\nTEST_F(SpaceToBatchConverterTest, PropagateThroughDot) {\n std::string hlo_string = R\"(\n HloModule module\n ENTRY computation {\n %p0 = bf16[1,258,258,32] parameter(0)\n %p1 = bf16[3,3,32,32] parameter(1)\n %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3},\n dim_labels=b01f_01io->b01f\n %p2 = bf16[32,32] parameter(2)\n ROOT %dot.5010 = bf16[1,256,256,32] dot(%convolution, %p2),\n lhs_contracting_dims={3},\n rhs_contracting_dims={0}\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n}\nTEST_F(SpaceToBatchConverterTest, PropagateOnTrivialReduce) {\n std::string hlo_string = R\"(\n HloModule module\n %region_1.37 (Arg_0.38: f32[], Arg_1.39: f32[]) -> f32[] {\n %Arg_0.38 = f32[] parameter(0)\n %Arg_1.39 = f32[] parameter(1)\n ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39)\n }\n ENTRY computation {\n %p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0)\n %p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1)\n %c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1),\n window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f\n %constant.5 = f32[] constant(0)\n ROOT %reduce.41 = f32[7,160,400]{2,1,0} reduce(%c, %constant.5), dimensions={3}, to_apply=%region_1.37\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Transpose());\n EXPECT_THAT(root->operand(0)->operand(0)->operand(0)->operand(0),\n op::Reduce());\n auto new_reduce = root->operand(0)->operand(0)->operand(0)->operand(0);\n EXPECT_EQ(new_reduce->shape().dimensions(1),\n 7 * 8);\n}\nTEST_F(SpaceToBatchConverterTest, DoNotPropagateOnTupleReduce) {\n std::string hlo_string = R\"(\n HloModule module\n%minmax_func.2717 {\n %lhs_value.2718 = f32[] parameter(0)\n %rhs_value.2720 = f32[] parameter(2)\n %compare.2722 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=GE\n %select.2723 = f32[] select(pred[] %compare.2722, f32[] %lhs_value.2718, f32[] %rhs_value.2720)\n %compare.2725 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=EQ\n %lhs_index.2719 = f32[] parameter(1)\n %rhs_index.2721 = f32[] parameter(3)\n %minimum.2726 = f32[] minimum(f32[] %lhs_index.2719, f32[] %rhs_index.2721)\n %select.2724 = f32[] select(pred[] %compare.2722, f32[] %lhs_index.2719, f32[] %rhs_index.2721)\n %select.2727 = f32[] select(pred[] %compare.2725, f32[] %minimum.2726, f32[] %select.2724)\n ROOT %tuple.4 = (f32[], f32[]) tuple(f32[] %select.2723, f32[] %select.2727)\n }\n ENTRY computation {\n %p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0)\n %p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1)\n %c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1),\n window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f\n %constant.5 = f32[] constant(0)\n %constant.6 = f32[] constant(1)\n ROOT %reduce.36 = (f32[7,160,400]{2,1,0}, f32[7,160,400]{2,1,0}) reduce(%c, %c,\n %constant.5, %constant.6), dimensions={3}, to_apply=%minmax_func.2717\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Reduce());\n}\nTEST_F(SpaceToBatchConverterTest, ReduceDegenerateDim) {\n std::string hlo_string = R\"(\n HloModule module\n %region_42.4982 {\n %Arg_0.38 = f32[] parameter(0)\n %Arg_1.39 = f32[] parameter(1)\n ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39)\n }\n ENTRY computation {\n %p0 = f32[2,1,84,84,3]{4,3,2,1,0} parameter(0)\n %p1 = f32[3,3,3,3,32]{4,3,2,1,0} parameter(1)\n %constant.10559 = f32[] constant(0)\n %convolution.98 = f32[2,1,84,84,32]{4,3,2,1,0} convolution(%p0, %p1), \n window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f\n ROOT %reduce.2606 = f32[2,84,84]{2,1,0} reduce(f32[2,1,84,84,32]{4,3,2,1,0} \n %convolution.98, f32[] %constant.10559), dimensions={1,4}, to_apply=%region_42.4982\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Transpose());\n EXPECT_THAT(root->operand(0), op::Slice());\n}\nTEST_F(SpaceToBatchConverterTest, PropagateOnReduce) {\n std::string hlo_string = R\"(\nHloModule xla_computation_unknown.14\nregion_0.134 {\n Arg_0.135 = f32[] parameter(0)\n Arg_1.136 = f32[] parameter(1)\n ROOT add.137 = f32[] add(Arg_0.135, Arg_1.136)\n}\nENTRY main.140 {\n p0 = bf16[1,512,32,128]{3,2,1,0} parameter(0)\n p1 = f32[3,3,128,128]{3,2,1,0} parameter(1)\n %convolution.755 = f32[1,512,32,128]{3,2,1,0}\n convolution(p0, p1),\n window={size=3x3 pad=1_1x1_1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f\n %constant.19458 = f32[] constant(0)\n ROOT %reduce.1354 = f32[128]{0} reduce(%convolution.755, %constant.19458),\n dimensions={0,1,2}, to_apply=%region_0.134\n}\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto computation = module->entry_computation();\n SpaceToBatchConverter converter(\n SpaceToBatchController{true, true, true, true, 8});\n ASSERT_TRUE(converter.Run(module.get()).value());\n HloInstruction* root = computation->root_instruction();\n EXPECT_THAT(root, op::Reduce());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1172,"cells":{"ID":{"kind":"string","value":"b5a9efe7-04e9-4757-ada9-15a588372915"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"host_offload_utils"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/profiler/utils/host_offload_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/host_offload_utils_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/profiler/utils/host_offload_utils.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/tsl/profiler/utils/timespan.h\"\n#include \"tensorflow/core/profiler/utils/trace_utils.h\"\n#include \"tensorflow/core/profiler/utils/xplane_builder.h\"\n#include \"tensorflow/core/profiler/utils/xplane_schema.h\"\n#include \"tensorflow/core/profiler/utils/xplane_visitor.h\"\nnamespace tensorflow {\nnamespace profiler {\nbool HostOffloadEventProcessor::IsHostOffloadOpName(\n const XEventVisitor& event) const {\n static constexpr absl::string_view keywords[] = {\"copy-start\",\n \"copy-done\",\n \"dynamic-slice-start\",\n \"dynamic-slice-done\",\n \"dynamic-update-slice-start\",\n \"dynamic-update-slice-done\"};\n for (const auto& keyword : keywords) {\n if (absl::StrContains(event.DisplayName(), keyword) &&\n absl::StrContains(event.Name(), host_memory_label_)) {\n return true;\n }\n }\n return false;\n}\nstd::string HostOffloadEventProcessor::GetOffloadInstructionID(\n absl::string_view op_name) const {\n std::vector op_name_vec = absl::StrSplit(op_name, '.');\n if (op_name_vec.size() < 2) {\n return \"0\";\n }\n return op_name_vec.back();\n}\nstd::string HostOffloadEventProcessor::GetOffloadInstructionName(\n absl::string_view op_name) const {\n std::string display_id = GetOffloadInstructionID(op_name);\n size_t startPos = op_name.find(\"-start\");\n size_t donePos = op_name.find(\"-done\");\n absl::string_view display_opname;\n if (startPos != absl::string_view::npos) {\n display_opname = op_name.substr(0, startPos);\n } else if (donePos != absl::string_view::npos) {\n display_opname = op_name.substr(0, donePos);\n } else {\n LOG(WARNING) << \"Invalid op name: \" << op_name;\n display_opname = op_name;\n }\n return absl::StrCat(\"offload-\", display_opname, \".\", display_id);\n}\nvoid HostOffloadEventProcessor::ProcessHostOffloadOpEvent(\n const XEventVisitor& event, std::optional group_id) {\n std::string display_opname = GetOffloadInstructionName(event.DisplayName());\n auto [iter, inserted] = seen_events_.try_emplace(display_opname);\n std::queue& events = iter->second;\n if (absl::StrContains(event.DisplayName(), \"-start\")) {\n events.push(&event);\n return;\n } else if (absl::StrContains(event.DisplayName(), \"-done\")) {\n if (events.empty()) {\n LOG(INFO) << \"No corresponding start event found for \"\n << event.DisplayName();\n return;\n }\n const XEventVisitor* start_event = events.front();\n events.pop();\n tsl::profiler::Timespan event_span = tsl::profiler::Timespan::FromEndPoints(\n start_event->GetTimespan().begin_ps(), event.GetTimespan().end_ps());\n int line_builder_index = -1;\n uint64_t minimum_end_time_frontier = event_span.begin_ps();\n for (int i = 0; i < host_offload_op_line_builders_.size(); ++i) {\n if (host_offload_op_line_builders_[i].event_end_time_frontier_ns <=\n minimum_end_time_frontier) {\n line_builder_index = i;\n minimum_end_time_frontier =\n host_offload_op_line_builders_[i].event_end_time_frontier_ns;\n }\n }\n constexpr int kMaxHostOffloadOpLinesSize =\n kThreadIdHostOffloadOpEnd - kThreadIdHostOffloadOpStart + 1;\n if (line_builder_index == -1) {\n if (host_offload_op_line_builders_.size() < kMaxHostOffloadOpLinesSize) {\n XLineBuilder lb = plane_builder_->GetOrCreateLine(\n kThreadIdHostOffloadOpStart +\n host_offload_op_line_builders_.size());\n lb.SetName(absl::StrFormat(\"%s row %d\", kHostOffloadOpLineName,\n host_offload_op_line_builders_.size()));\n lb.SetTimestampNs(start_timestamp_ns_);\n host_offload_op_line_builders_.push_back(\n {std::move(lb), event_span.end_ps()});\n }\n line_builder_index = host_offload_op_line_builders_.size() - 1;\n }\n host_offload_op_line_builders_[line_builder_index]\n .event_end_time_frontier_ns =\n std::max(host_offload_op_line_builders_[line_builder_index]\n .event_end_time_frontier_ns,\n event_span.end_ps());\n XEventMetadata* host_offload_copy_metadata =\n plane_builder_->CreateEventMetadata();\n host_offload_copy_metadata->set_display_name(display_opname);\n XEventBuilder event_builder =\n host_offload_op_line_builders_[line_builder_index]\n .line_builder.AddEvent(*host_offload_copy_metadata);\n event_builder.SetTimespan(event_span);\n const XStatMetadata& async_stat = *plane_builder_->GetOrCreateStatMetadata(\n GetStatTypeStr(StatType::kIsAsync));\n event_builder.AddStatValue(async_stat, 1);\n }\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/host_offload_utils.h\"\n#include \n#include \n#include \n#include \"xla/shape_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace host_offload_utils {\nnamespace {\nclass HostOffloadUtilsTest : public HloTestBase {};\nTEST_F(HostOffloadUtilsTest, SimpleGetSuccessorsGetPredecessorsTest) {\n const std::string& hlo_string = R\"(\nHloModule my_module\nENTRY main {\n data_param = f32[1,2048,2048] parameter(0)\n index_param = s32[] parameter(1)\n constant_f32_0 = f32[] constant(0)\n constant_s32_0 = s32[] constant(0)\n broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={}\n offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target=\"MoveToHost\"\n dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0)\n dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}\n ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target=\"MoveToDevice\"\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* data_param = FindInstruction(module.get(), \"data_param\");\n ASSERT_NE(data_param, nullptr);\n HloInstruction* offload_custom_call =\n FindInstruction(module.get(), \"offload_custom_call\");\n ASSERT_NE(offload_custom_call, nullptr);\n TF_ASSERT_OK_AND_ASSIGN(\n std::vector succ,\n GetSuccessors(InstructionAndShapeIndex(data_param, {})));\n std::vector expected_succ = {\n InstructionAndShapeIndex(offload_custom_call, {})};\n EXPECT_EQ(succ, expected_succ);\n std::vector pred =\n GetPredecessors(InstructionAndShapeIndex(offload_custom_call, {}));\n std::vector expected_pred = {\n InstructionAndShapeIndex(data_param, {})};\n EXPECT_EQ(pred, expected_pred);\n}\nTEST_F(HostOffloadUtilsTest, ComputationGetSuccessorsGetPredecessorsTest) {\n const std::string& hlo_string = R\"(\nHloModule my_module\nother_computation {\n param_0 = f32[2048] parameter(0)\n param_1 = f32[2048] parameter(1)\n ROOT tuple = (f32[2048], f32[2048]) tuple(param_0, param_1)\n}\nENTRY main {\n data_param = f32[2048] parameter(0)\n other_param = f32[2048] parameter(1)\n offload_custom_call = f32[2048] custom-call(data_param), custom_call_target=\"MoveToHost\"\n call = (f32[2048], f32[2048]) call(offload_custom_call, other_param), to_apply=other_computation\n gte_0 = f32[2048] get-tuple-element(call), index=0\n gte_1 = f32[2048] get-tuple-element(call), index=1\n ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target=\"MoveToDevice\"\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloInstruction* call = FindInstruction(module.get(), \"call\");\n ASSERT_NE(call, nullptr);\n HloInstruction* gte_0 = FindInstruction(module.get(), \"gte_0\");\n ASSERT_NE(gte_0, nullptr);\n HloInstruction* tuple = FindInstruction(module.get(), \"tuple\");\n ASSERT_NE(tuple, nullptr);\n TF_ASSERT_OK_AND_ASSIGN(std::vector succ,\n GetSuccessors(InstructionAndShapeIndex(call, {0})));\n std::vector expected_succ = {\n InstructionAndShapeIndex(gte_0, {})};\n EXPECT_EQ(succ, expected_succ);\n std::vector pred =\n GetPredecessors(InstructionAndShapeIndex(call, {0}));\n std::vector expected_pred = {\n InstructionAndShapeIndex(tuple, {0})};\n EXPECT_EQ(pred, expected_pred);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/host_offload_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offload_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1173,"cells":{"ID":{"kind":"string","value":"824e9a92-d73e-40d9-bb47-186cbd4b9da5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"host_memory_transfer_asyncifier"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/host_memory_transfer_asyncifier.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/host_memory_transfer_asyncifier.h\"\n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nclass HostMemoryTransferAsyncifierVisitor : public DfsHloVisitorWithDefault {\n public:\n explicit HostMemoryTransferAsyncifierVisitor(int64_t host_memory_space_color)\n : kHostMemorySpaceColor(host_memory_space_color) {}\n bool Changed() const { return changed_; }\n absl::Status DefaultAction(HloInstruction* hlo_instruction) override {\n return absl::OkStatus();\n }\n absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override {\n HloInstruction* dynamic_slice_operand = dynamic_slice->mutable_operand(0);\n if (!dynamic_slice->shape().has_layout()) {\n return InternalStrCat(dynamic_slice->name(), \" does not have a layout.\");\n }\n if (!dynamic_slice_operand->shape().has_layout()) {\n return InternalStrCat(dynamic_slice->name(), \"'s operand, \",\n dynamic_slice_operand->name(),\n \", does not have a layout.\");\n }\n VLOG(3) << absl::StreamFormat(\n \"\\\"%s\\\" from S(%d) to S(%d)\", dynamic_slice->name(),\n dynamic_slice_operand->shape().layout().memory_space(),\n dynamic_slice->shape().layout().memory_space());\n if (dynamic_slice_operand->shape().layout().memory_space() !=\n kHostMemorySpaceColor) {\n return absl::OkStatus();\n }\n if (dynamic_slice->shape().layout().memory_space() !=\n xla::Layout::kDefaultMemorySpace) {\n return absl::OkStatus();\n }\n const Shape context_shape = ShapeUtil::MakeScalarShape(U32);\n const Shape transfer_bytes_shape = ShapeUtil::MakeScalarShape(S32);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * async_done,\n dynamic_slice->parent()->CreateAsyncInstructions(\n dynamic_slice, {context_shape, transfer_bytes_shape}));\n VLOG(1) << \"DynamicSlice \\\"\" << dynamic_slice->ToString()\n << \"\\\" is slicing from host memory. Converting to async \"\n << async_done->ToString();\n MarkAsChanged();\n return absl::OkStatus();\n }\n absl::Status HandleDynamicUpdateSlice(\n HloInstruction* dynamic_update_slice) override {\n HloInstruction* dynamic_update_slice_operand =\n dynamic_update_slice->mutable_operand(0);\n HloInstruction* dynamic_update_slice_update =\n dynamic_update_slice->mutable_operand(1);\n if (!dynamic_update_slice->shape().has_layout()) {\n return InternalStrCat(dynamic_update_slice->name(),\n \" does not have a layout.\");\n }\n if (!dynamic_update_slice_operand->shape().has_layout()) {\n return InternalStrCat(dynamic_update_slice->name(), \"'s operand, \",\n dynamic_update_slice_operand->name(),\n \", does not have a layout.\");\n }\n if (!dynamic_update_slice_update->shape().has_layout()) {\n return InternalStrCat(dynamic_update_slice->name(), \"'s update, \",\n dynamic_update_slice_update->name(),\n \", does not have a layout.\");\n }\n if (dynamic_update_slice_update->shape().layout().memory_space() !=\n xla::Layout::kDefaultMemorySpace) {\n return absl::OkStatus();\n }\n if (dynamic_update_slice->shape().layout().memory_space() !=\n kHostMemorySpaceColor) {\n return absl::OkStatus();\n }\n if (dynamic_update_slice_operand->shape().layout().memory_space() !=\n dynamic_update_slice->shape().layout().memory_space()) {\n return InternalStrCat(\n \"Unexpected that \", dynamic_update_slice_operand->name(),\n \"'s memory space is not the same as the dynamic-update-slice.\");\n }\n const Shape context_shape = ShapeUtil::MakeScalarShape(U32);\n TF_ASSIGN_OR_RETURN(HloInstruction * async_done,\n dynamic_update_slice->parent()->CreateAsyncInstructions(\n dynamic_update_slice, {context_shape}));\n VLOG(1) << \"DynamicUpdateSlice \\\"\" << dynamic_update_slice->ToString()\n << \"\\\" is slicing into host memory space. Converting to async \"\n << async_done->ToString();\n MarkAsChanged();\n return absl::OkStatus();\n }\n absl::Status HandleCopy(HloInstruction* copy) override {\n HloInstruction* operand = copy->mutable_operand(0);\n if (!operand->shape().has_layout()) {\n return InternalStrCat(operand->name(), \" does not have a layout.\");\n }\n if (!copy->shape().has_layout()) {\n return InternalStrCat(copy->name(), \" does not have a layout.\");\n }\n const auto copy_src_memory_space = operand->shape().layout().memory_space();\n const auto copy_dst_memory_space = copy->shape().layout().memory_space();\n if (!((copy_src_memory_space == kHostMemorySpaceColor &&\n copy_dst_memory_space == xla::Layout::kDefaultMemorySpace) ||\n (copy_src_memory_space == xla::Layout::kDefaultMemorySpace &&\n copy_dst_memory_space == kHostMemorySpaceColor))) {\n VLOG(2)\n << \"Skipping copy because it is not a copy between device memory and \"\n \"host memory: \"\n << copy->ToString();\n return absl::OkStatus();\n }\n const Shape context_shape = ShapeUtil::MakeScalarShape(U32);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * async_done,\n copy->parent()->CreateAsyncInstructions(copy, {context_shape}));\n VLOG(1)\n << \"Copy \\\"\" << copy->name()\n << \"\\\" is between device and host memory space. Converting to async \"\n << async_done->ToString();\n MarkAsChanged();\n return absl::OkStatus();\n }\n private:\n const int64_t kHostMemorySpaceColor;\n bool changed_ = false;\n void MarkAsChanged() { changed_ = true; }\n};\n} \nabsl::StatusOr HostMemoryTransferAsyncifier::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n HostMemoryTransferAsyncifierVisitor visitor(kHostMemorySpaceColor);\n for (HloComputation* computation : module->MakeNonfusionComputations()) {\n TF_RETURN_IF_ERROR(computation->Accept(&visitor));\n }\n return visitor.Changed();\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/host_memory_transfer_asyncifier.h\"\n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace m = ::xla::match;\nclass HostMemoryTransferAsyncifierTest : public HloTestBase {\n protected:\n absl::StatusOr RunAsyncifier(absl::string_view hlo_string) {\n TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSIGN_OR_RETURN(bool changed, RunAsyncifier(module.get()));\n return changed;\n }\n absl::StatusOr RunAsyncifier(HloModule* module) {\n TF_EXPECT_OK(verifier().Run(module).status());\n if (module->has_schedule()) {\n return absl::InternalError(\"Expected a non-scheduled module\");\n }\n HostMemoryTransferAsyncifier asyncifier(kHostMemorySpaceColor);\n return asyncifier.Run(module);\n }\n private:\n static constexpr int64_t kHostMemorySpaceColor{5};\n};\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToHost) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)\n host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)\n constant_0 = s32[] constant(0)\n ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, host_update, constant_0, constant_0, constant_0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::DynamicUpdateSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToDevice) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)\n update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)\n constant_0 = s32[] constant(0)\n ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, update, constant_0, constant_0, constant_0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::DynamicUpdateSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToDevice) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)\n host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)\n constant_0 = s32[] constant(0)\n ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, host_update, constant_0, constant_0, constant_0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::DynamicUpdateSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToHost) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)\n update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)\n constant_0 = s32[] constant(0)\n ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, update, constant_0, constant_0, constant_0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* dynamic_update_slice_start;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Op()\n .WithOpcode(HloOpcode::kAsyncDone)\n .WithOperand(0, m::Op(&dynamic_update_slice_start)\n .WithOpcode(HloOpcode::kAsyncStart))));\n ASSERT_EQ(dynamic_update_slice_start->called_computations().size(), 1);\n HloComputation* async_dynamic_slice_computation =\n dynamic_update_slice_start->called_computations().at(0);\n EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),\n GmockMatch(m::DynamicUpdateSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToHost) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)\n constant_0 = s32[] constant(0)\n ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::DynamicSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToDevice) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)\n constant_0 = s32[] constant(0)\n ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::DynamicSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToHost) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)\n constant_0 = s32[] constant(0)\n ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::DynamicSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToDevice) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)\n constant_0 = s32[] constant(0)\n ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* dynamic_slice_start;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(m::Op()\n .WithOpcode(HloOpcode::kAsyncDone)\n .WithOperand(0, m::Op(&dynamic_slice_start)\n .WithOpcode(HloOpcode::kAsyncStart))));\n ASSERT_EQ(dynamic_slice_start->called_computations().size(), 1);\n HloComputation* async_dynamic_slice_computation =\n dynamic_slice_start->called_computations().at(0);\n EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),\n GmockMatch(m::DynamicSlice()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, CopyFromHostToHost) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)\n ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(host_memory)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Copy()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, CopyFromDeviceToDevice) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)\n ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(device)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Copy()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromDeviceToHost) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)\n ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* copy_start;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(\n m::Op()\n .WithOpcode(HloOpcode::kAsyncDone)\n .WithOperand(\n 0, m::Op(&copy_start).WithOpcode(HloOpcode::kAsyncStart))));\n ASSERT_EQ(copy_start->called_computations().size(), 1);\n HloComputation* async_copy_computation =\n copy_start->called_computations().at(0);\n EXPECT_THAT(async_copy_computation->root_instruction(),\n GmockMatch(m::Copy()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromDeviceToHost) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)\n ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* copy_start;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(\n m::Op()\n .WithOpcode(HloOpcode::kCopyDone)\n .WithOperand(\n 0, m::Op(&copy_start).WithOpcode(HloOpcode::kCopyStart))));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromHostToDevice) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)\n ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* copy_start;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(\n m::Op()\n .WithOpcode(HloOpcode::kAsyncDone)\n .WithOperand(\n 0, m::Op(&copy_start).WithOpcode(HloOpcode::kAsyncStart))));\n ASSERT_EQ(copy_start->called_computations().size(), 1);\n HloComputation* async_copy_computation =\n copy_start->called_computations().at(0);\n EXPECT_THAT(async_copy_computation->root_instruction(),\n GmockMatch(m::Copy()));\n}\nTEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromHostToDevice) {\n const std::string& hlo_string = R\"(\nHloModule MyModule\nENTRY main {\n host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)\n ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* copy_start;\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n GmockMatch(\n m::Op()\n .WithOpcode(HloOpcode::kCopyDone)\n .WithOperand(\n 0, m::Op(&copy_start).WithOpcode(HloOpcode::kCopyStart))));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1174,"cells":{"ID":{"kind":"string","value":"8d39f7fb-274f-498c-acfb-4154cac83495"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"stream_pool"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/stream_pool.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/stream_pool_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/stream_pool.h\"\n#include \n#include \n#include \"absl/strings/str_format.h\"\nnamespace xla {\nStreamPool::Ptr StreamPool::BorrowStream(se::StreamPriority priority) {\n std::unique_ptr stream;\n {\n absl::MutexLock lock(&mu_);\n if (streams_with_pri_.find(priority) == streams_with_pri_.end()) {\n stream = nullptr;\n } else {\n while (!streams_with_pri_[priority].empty() && !stream) {\n stream = std::move(streams_with_pri_[priority].back());\n streams_with_pri_[priority].pop_back();\n if (stream->ok()) {\n VLOG(1) << absl::StrFormat(\n \"StreamPool reusing existing stream (%p) with priority: %s\",\n stream.get(), se::StreamPriorityToString(priority));\n } else {\n VLOG(1) << absl::StrFormat(\n \"Stream (%p) was not ok, deleting with : %s\", stream.get(),\n se::StreamPriorityToString(priority));\n stream = nullptr;\n }\n }\n }\n }\n if (!stream) {\n stream = executor_->CreateStream(priority).value();\n stream->set_name(absl::StrFormat(\"%s pool stream\",\n se::StreamPriorityToString(priority)));\n VLOG(1) << absl::StrFormat(\"Created new stream (%p) with priority = %s\",\n stream.get(),\n se::StreamPriorityToString(priority));\n }\n PtrDeleter deleter = {this};\n return Ptr(stream.release(), deleter);\n}\nvoid StreamPool::ReturnStream(se::Stream* stream) {\n if (stream->ok()) {\n VLOG(1) << absl::StrFormat(\"StreamPool returning ok stream (%p)\", stream);\n absl::MutexLock lock(&mu_);\n auto priority = std::get(stream->priority());\n streams_with_pri_[priority].emplace_back(stream);\n } else {\n VLOG(1) << absl::StrFormat(\"StreamPool deleting !ok stream (%p)\", stream);\n delete stream;\n }\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/stream_pool.h\"\n#include \n#include \"xla/stream_executor/platform_manager.h\"\n#include \"xla/stream_executor/stream_executor.h\"\n#include \"xla/test_helpers.h\"\nnamespace xla {\nnamespace {\nclass StreamPoolTest : public ::testing::Test {\n protected:\n se::StreamExecutor* NewStreamExecutor() {\n se::Platform* platform =\n se::PlatformManager::PlatformWithName(\"Host\").value();\n return platform->ExecutorForDevice(0).value();\n }\n};\nTEST_F(StreamPoolTest, EmptyPool) {\n se::StreamExecutor* executor = NewStreamExecutor();\n StreamPool pool(executor);\n}\nTEST_F(StreamPoolTest, OneStreamPool) {\n se::StreamExecutor* executor = NewStreamExecutor();\n StreamPool pool(executor);\n StreamPool::Ptr stream1 = pool.BorrowStream();\n se::Stream* stream1_ptr = stream1.get();\n EXPECT_TRUE(stream1->ok());\n stream1 = nullptr;\n StreamPool::Ptr stream2 = pool.BorrowStream();\n se::Stream* stream2_ptr = stream2.get();\n EXPECT_TRUE(stream2->ok());\n stream2 = nullptr;\n EXPECT_EQ(stream1_ptr, stream2_ptr);\n}\nTEST_F(StreamPoolTest, TwoStreamPool) {\n se::StreamExecutor* executor = NewStreamExecutor();\n StreamPool pool(executor);\n StreamPool::Ptr stream1 = pool.BorrowStream();\n se::Stream* stream1_ptr = stream1.get();\n EXPECT_TRUE(stream1->ok());\n StreamPool::Ptr stream2 = pool.BorrowStream();\n se::Stream* stream2_ptr = stream2.get();\n EXPECT_TRUE(stream2->ok());\n EXPECT_NE(stream1_ptr, stream2_ptr);\n stream1 = nullptr;\n StreamPool::Ptr stream3 = pool.BorrowStream();\n se::Stream* stream3_ptr = stream3.get();\n EXPECT_TRUE(stream3->ok());\n EXPECT_EQ(stream1_ptr, stream3_ptr);\n EXPECT_NE(stream2_ptr, stream3_ptr);\n stream2 = nullptr;\n StreamPool::Ptr stream4 = pool.BorrowStream();\n se::Stream* stream4_ptr = stream4.get();\n EXPECT_TRUE(stream4->ok());\n EXPECT_EQ(stream2_ptr, stream4_ptr);\n EXPECT_NE(stream3_ptr, stream4_ptr);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1175,"cells":{"ID":{"kind":"string","value":"3896e059-52e8-44fa-aa37-6d162956acdf"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_rematerialization"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/hlo_rematerialization.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/hlo_rematerialization_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/hlo_rematerialization.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/functional/function_ref.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_clone_context.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/ir/hlo_schedule.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/map_util.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/hlo_cost_analysis.h\"\n#include \"xla/service/hlo_dataflow_analysis.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/logical_buffer.h\"\n#include \"xla/service/tuple_points_to_analysis.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/numbers.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing ::tsl::strings::HumanReadableNumBytes;\nbool IsRematerializable(const HloInstruction* instruction) {\n if (instruction->opcode() == HloOpcode::kCopy) {\n if (LayoutUtil::Equal(instruction->shape().layout(),\n instruction->operand(0)->shape().layout())) {\n return false;\n }\n }\n if (auto collective = DynCast(instruction)) {\n return !collective->constrain_layout();\n }\n switch (instruction->opcode()) {\n case HloOpcode::kCall:\n case HloOpcode::kConstant:\n case HloOpcode::kConditional:\n case HloOpcode::kCustomCall:\n case HloOpcode::kParameter:\n case HloOpcode::kWhile:\n return false;\n default:\n return !instruction->HasSideEffect();\n }\n}\nbool CanBeRematerialized(\n const HloInstruction* instruction,\n absl::flat_hash_map* rematerializable_map) {\n auto it = rematerializable_map->find(instruction);\n if (it != rematerializable_map->end()) {\n return it->second;\n }\n bool rematerializable = IsRematerializable(instruction);\n (*rematerializable_map)[instruction] = rematerializable;\n return rematerializable;\n}\nbool IsSupportedIndirectUser(const HloInstruction* instruction) {\n return instruction->opcode() == HloOpcode::kBitcast ||\n instruction->opcode() == HloOpcode::kGetTupleElement;\n}\nusing BufferId = int64_t;\nusing BufferIdList = absl::InlinedVector;\nstruct RematStrategy {\n enum {\n kRecompute,\n kCompress,\n kHostOffload,\n } kind;\n Shape compact_shape;\n};\nstruct Item {\n HloInstruction* instruction;\n bool placed = false;\n bool denylisted = false;\n BufferIdList buffers_defined;\n BufferIdList buffers_output;\n BufferIdList buffers_used;\n bool is_skip_node = false;\n private:\n friend class InstructionList;\n Item* next = nullptr;\n Item* prev = nullptr;\n Item* prev_skip_node = nullptr;\n Item* next_skip_node = nullptr;\n int64_t position;\n};\nstruct ItemUse {\n Item* user;\n int64_t operand_number;\n std::optional index;\n ItemUse(Item* user, int64_t op_num, std::optional index)\n : user(user), operand_number(op_num), index(index) {}\n bool operator==(const ItemUse& other) const {\n return user == other.user && operand_number == other.operand_number &&\n index == other.index;\n }\n};\nusing ItemList = absl::InlinedVector;\nusing UsesList = absl::InlinedVector;\nclass InstructionList {\n public:\n explicit InstructionList(const HloInstructionSequence& order) {\n int64_t position = 0;\n Item* last = nullptr;\n last_skip_node_ = nullptr;\n first_skip_node_ = nullptr;\n for (HloInstruction* inst : order.instructions()) {\n Item* item = new Item;\n item->next = nullptr;\n item->prev = last;\n if (last == nullptr) {\n first_ = item;\n } else {\n last->next = item;\n }\n last = item;\n item->instruction = inst;\n item->position = position;\n position++;\n item_map_[inst] = item;\n }\n }\n ~InstructionList() {\n for (Item* item = first_; item != nullptr;) {\n Item* next = item->next;\n delete item;\n item = next;\n }\n }\n size_t size() const { return item_map_.size(); }\n Item* first() const { return first_; }\n Item* next(Item* item) const { return item->next; }\n const Item* next(const Item* item) const { return item->next; }\n Item* prev(Item* item) const { return item->prev; }\n const Item* prev(const Item* item) const { return item->prev; }\n Item* first_skip_node() const { return first_skip_node_; }\n Item* next_skip_node(Item* item) const { return item->next_skip_node; }\n Item* CreateItem(HloInstruction* inst) {\n Item* item = new Item;\n item->instruction = inst;\n CHECK(item_map_.insert({inst, item}).second)\n << \"inserting inst twice \" << inst->name();\n return item;\n }\n Item* GetItem(const HloInstruction* inst) const {\n auto iter = item_map_.find(inst);\n CHECK(iter != item_map_.end()) << \"Did not find \" << inst->name();\n return iter->second;\n }\n void InsertBeforeInstructions(Item* to_insert,\n absl::Span before_instructions) {\n VLOG(3) << \"InsertBeforeInstructions: \" << to_insert->instruction->name()\n << \" before {\"\n << absl::StrJoin(before_instructions, \", \",\n [](std::string* out, Item* item) {\n absl::StrAppend(out, item->instruction->name());\n })\n << \"}\";\n CHECK(!before_instructions.empty());\n Item* min_position_item = nullptr;\n for (Item* item : before_instructions) {\n if (min_position_item == nullptr ||\n item->position < min_position_item->position) {\n min_position_item = item;\n }\n }\n while (min_position_item->prev != nullptr &&\n min_position_item->position == min_position_item->prev->position) {\n min_position_item = min_position_item->prev;\n }\n while (!absl::c_linear_search(before_instructions, min_position_item)) {\n min_position_item = min_position_item->next;\n }\n return InsertBefore(to_insert, min_position_item);\n }\n void PromoteNodesToSkip(absl::FunctionRef should_promote) {\n int64_t count = 0;\n for (auto* item = first(); item != nullptr; item = next(item)) {\n if (should_promote(item)) {\n count += 1;\n if (first_skip_node_ == nullptr) {\n first_skip_node_ = item;\n }\n item->is_skip_node = true;\n item->prev_skip_node = last_skip_node_;\n if (last_skip_node_ != nullptr) {\n last_skip_node_->next_skip_node = item;\n }\n last_skip_node_ = item;\n }\n }\n VLOG(1) << \" Rematerialization has \" << count << \" items in express lane\";\n }\n void InsertAfterInstructions(Item* to_insert,\n absl::Span after_instructions) {\n VLOG(3) << \"InsertAfterInstructions: \" << to_insert->instruction->name()\n << \" after {\"\n << absl::StrJoin(after_instructions, \", \",\n [](std::string* out, Item* item) {\n absl::StrAppend(out, item->instruction->name());\n })\n << \"}\";\n CHECK(!after_instructions.empty());\n Item* max_position_item = nullptr;\n for (Item* item : after_instructions) {\n if (max_position_item == nullptr ||\n item->position > max_position_item->position) {\n max_position_item = item;\n }\n }\n CHECK(max_position_item->next != nullptr);\n InsertBeforeInstructions(to_insert, {max_position_item->next});\n }\n void Denylist(const HloInstruction* inst) {\n GetItem(inst)->denylisted = true;\n }\n private:\n void InsertBefore(Item* item, Item* before) {\n VLOG(3) << \"InsertBefore: \" << item->instruction->name() << \" before \"\n << before->instruction->name();\n item->is_skip_node = true;\n Item* cursor = before;\n while (cursor != nullptr && !cursor->is_skip_node) {\n cursor = cursor->next;\n }\n CHECK(cursor == nullptr || cursor->is_skip_node);\n if (cursor == nullptr) {\n item->prev_skip_node = last_skip_node_;\n item->next_skip_node = nullptr;\n last_skip_node_ = item;\n } else {\n CHECK(cursor->is_skip_node);\n item->prev_skip_node = cursor->prev_skip_node;\n if (item->prev_skip_node != nullptr) {\n item->prev_skip_node->next_skip_node = item;\n }\n item->next_skip_node = cursor;\n cursor->prev_skip_node = item;\n }\n if (first_skip_node_ == cursor) {\n first_skip_node_ = item;\n }\n item->prev = before->prev;\n item->next = before;\n before->prev = item;\n if (item->prev != nullptr) {\n item->prev->next = item;\n } else {\n first_ = item;\n }\n item->position = before->position;\n }\n Item* first_;\n Item* first_skip_node_;\n Item* last_skip_node_;\n absl::flat_hash_map item_map_;\n};\nUsesList GetUsers(const InstructionList& instruction_list,\n const LogicalBuffer* logical_buffer,\n const TuplePointsToAnalysis& points_to_analysis,\n bool* has_indirect_users) {\n UsesList users;\n *has_indirect_users = false;\n for (const BufferAlias& buffer_alias :\n points_to_analysis.GetBufferAliases(*logical_buffer)) {\n for (const HloInstruction* user : buffer_alias.instruction()->users()) {\n if (points_to_analysis.DoesNotUseOperandBuffer(\n buffer_alias.instruction(), buffer_alias.index(), user)) {\n continue;\n }\n if (buffer_alias.instruction() != logical_buffer->instruction() &&\n !IsSupportedIndirectUser(buffer_alias.instruction())) {\n *has_indirect_users = true;\n }\n Item* user_item = instruction_list.GetItem(user);\n std::optional user_index =\n logical_buffer->index().size() != 1\n ? std::nullopt\n : std::make_optional(logical_buffer->index().back());\n for (int64_t op_idx : user->OperandIndices(buffer_alias.instruction())) {\n if (!absl::c_linear_search(\n users,\n ItemUse{user_item, static_cast(op_idx), user_index})) {\n users.push_back(\n ItemUse{user_item, static_cast(op_idx), user_index});\n }\n }\n }\n }\n return users;\n}\nclass MemoryUsageTracker {\n public:\n MemoryUsageTracker(const HloRematerialization::Options& options,\n const HloComputation* computation,\n const TuplePointsToAnalysis& points_to_analysis,\n const InstructionList& instruction_list);\n absl::Status BeginInstruction(Item* item);\n int64_t RematerializationCost(const std::vector& items,\n int64_t memory_reduced,\n int64_t memory_limit_bytes) const {\n bool zero_cost_move = true;\n for (auto* item : items) {\n auto* instruction = item->instruction;\n if (absl::c_any_of(\n instruction->users(),\n [this](const HloInstruction* inst) { return IsPlaced(inst); })) {\n zero_cost_move = false;\n break;\n }\n }\n if (zero_cost_move) {\n return 0;\n }\n CHECK_GT(memory_reduced, 0);\n return memory_limit_bytes / memory_reduced;\n }\n absl::Status EndInstruction();\n int64_t MemoryReducedIfCompressed(const Item* item,\n const Shape& compact_shape) const;\n int64_t MemoryReducedIfRematerialized(\n absl::Span items) const;\n absl::Status AddCompressInstructions(Item* original_item,\n Item* compressed_item,\n Item* uncompressed_item);\n absl::Status AddRematerializedInstruction(Item* original_item,\n Item* remat_item,\n absl::Span indirect_users);\n std::tuple GetPlacedAndUnplacedUsers(\n const UsesList& uses) const;\n public:\n absl::Status AddHostOffloadCopyInstructions(Item* original_item,\n Item* copy_start_to_host_item,\n Item* copy_done_to_host_item,\n Item* copy_start_to_device_item,\n Item* copy_done_to_device_item);\n int64_t BytesUsedByBuffers(const Item* item,\n bool only_count_unplaced_users) const;\n std::optional GetCostOfCompression(const Item* candidate_item,\n int64_t memory_limit_bytes,\n int64_t peak_memory_bytes);\n std::optional GetCostOfHostOffload(const Item* candidate_item,\n int64_t memory_limit_bytes) const;\n std::optional GetCostOfRecompute(\n const std::vector& candidate_items,\n int64_t memory_limit_bytes) const;\n std::tuple, RematStrategy, int>\n PickRematerializationCandidates(\n const InstructionList& instruction_list, int64_t memory_limit_bytes,\n absl::flat_hash_map* rematerializable_map,\n int min_block_size, int max_block_size, int64_t peak_memory_bytes);\n bool IsPlaced(const HloInstruction* instruction) const {\n return instruction_list_.GetItem(instruction)->placed;\n }\n bool HasUnplacedUsers(Item* item) const;\n UsesList GetItemUses(Item* item) const;\n bool IsInProgressItem(Item* item) const { return item == in_progress_item_; }\n int64_t memory_usage() const { return memory_usage_; }\n int64_t AllocatedSize(Item* item) const {\n int64_t size = 0;\n for (auto buffer_id : item->buffers_defined) {\n size += AllocatedSize(buffer_id);\n }\n return size;\n }\n const HloComputation* computation() const { return computation_; }\n const HloRematerialization::Options& options() const { return options_; }\n bool Check() const;\n std::string ToString() const;\n private:\n struct Buffer {\n const BufferId id;\n Item* defining_instruction;\n const int64_t size;\n Shape shape;\n bool live_out;\n bool has_indirect_uses;\n ShapeIndex index;\n UsesList users;\n int64_t unfinished_user_count;\n std::string ToString() const {\n return absl::StrCat(\"Buffer \", id, \" (defined by \",\n defining_instruction->instruction->name(), \", size \",\n size, \" bytes)\");\n }\n };\n void CountAllocatedMemory(Item* item);\n absl::Status CountFreedMemory(Item* item);\n void ReplaceUsesInUsersOfBuffer(Buffer& buffer, BufferId old_id) const;\n absl::StatusOr GetCompactShape(const HloInstruction* hlo);\n Buffer& CreateBufferFromLogicalBuffer(\n const LogicalBuffer* logical_buffer,\n const TuplePointsToAnalysis& points_to_analysis, bool live_out) {\n bool has_indirect_uses = false;\n UsesList users = GetUsers(instruction_list_, logical_buffer,\n points_to_analysis, &has_indirect_uses);\n return NewBuffer(instruction_list_.GetItem(logical_buffer->instruction()),\n logical_buffer->shape(), logical_buffer->index(),\n std::move(users), live_out, has_indirect_uses);\n }\n Buffer& RematerializeBuffer(const Buffer& original_buffer, Item* remat_item,\n UsesList&& rematerialized_uses) {\n CHECK(original_buffer.defining_instruction->placed)\n << original_buffer.defining_instruction->instruction->name();\n CHECK(!original_buffer.has_indirect_uses) << original_buffer.ToString();\n CHECK(!original_buffer.live_out) << original_buffer.ToString();\n for (ItemUse& use : rematerialized_uses) {\n CHECK(!use.user->placed) << use.user->instruction->name();\n }\n return NewBuffer(remat_item, original_buffer.shape, original_buffer.index,\n std::move(rematerialized_uses), false,\n false);\n }\n int64_t AllocatedSize(BufferId buffer_id) const {\n const Buffer& buffer = buffers_.at(buffer_id);\n HloInstruction* inst = buffer.defining_instruction->instruction;\n HloOpcode def_opcode = inst->opcode();\n if (buffer.live_out || def_opcode == HloOpcode::kParameter) {\n return 0;\n } else {\n if (options_.host_memory_offload_config && buffer.shape.has_layout() &&\n buffer.shape.layout().memory_space() ==\n options_.host_memory_offload_config->host_memory_space) {\n return 0;\n }\n return buffer.size;\n }\n }\n bool IsFinished(Item* item) const {\n return item->placed && item != in_progress_item_;\n }\n bool IsInUse(BufferId buffer_id) const {\n if (in_progress_item_ == nullptr) {\n return false;\n }\n const BufferIdList& in_progress_uses = in_progress_item_->buffers_used;\n return absl::c_linear_search(in_progress_uses, buffer_id);\n }\n bool IsCurrentlyLive(BufferId buffer_id) const {\n const Buffer& buffer = buffers_[buffer_id];\n return (buffer.defining_instruction->placed &&\n buffer.unfinished_user_count > 0);\n }\n bool IsInstructionCurrentlyLive(const Item* instruction) const {\n if (!IsPlaced(instruction->instruction)) {\n return false;\n }\n for (const HloInstruction* user : instruction->instruction->users()) {\n if (!IsPlaced(user)) {\n return true;\n }\n }\n return false;\n }\n Buffer& NewBuffer(Item* defining_instruction, const Shape& shape,\n const ShapeIndex& index, UsesList&& uses, bool live_out,\n bool has_indirect_uses) {\n int buffer_id = buffers_.size();\n auto get_num_of_unique_users = [](const UsesList& uses) -> int64_t {\n absl::flat_hash_set users_set;\n for (const ItemUse& use : uses) {\n users_set.insert(use.user);\n }\n return users_set.size();\n };\n buffers_.push_back(Buffer{buffer_id, defining_instruction,\n options_.hlo_cost_analysis.GetShapeSize(shape),\n shape, live_out, has_indirect_uses, index, uses,\n get_num_of_unique_users(uses)});\n return buffers_.back();\n }\n const HloRematerialization::Options& options_;\n const HloComputation* computation_;\n const InstructionList& instruction_list_;\n absl::flat_hash_map compact_shape_;\n int64_t memory_usage_ = 0;\n Item* in_progress_item_ = nullptr;\n std::vector buffers_;\n};\nMemoryUsageTracker::MemoryUsageTracker(\n const HloRematerialization::Options& options,\n const HloComputation* computation,\n const TuplePointsToAnalysis& points_to_analysis,\n const InstructionList& instruction_list)\n : options_(options),\n computation_(computation),\n instruction_list_(instruction_list) {\n PointsToSet::BufferSet live_out_set =\n points_to_analysis.GetPointsToSet(computation_->root_instruction())\n .CreateFlattenedSet();\n absl::flat_hash_map\n logical_buffer_to_buffer_id;\n for (auto* item = instruction_list_.first(); item != nullptr;\n item = instruction_list_.next(item)) {\n const HloInstruction* const instruction = item->instruction;\n for (const LogicalBuffer* logical_buffer :\n points_to_analysis.GetBuffersDefinedByInstruction(instruction)) {\n Buffer* buffer;\n if (instruction->opcode() == HloOpcode::kWhile) {\n const PointsToSet& operand_points_to =\n points_to_analysis.GetPointsToSet(instruction->operand(0));\n CHECK_EQ(operand_points_to.element(logical_buffer->index()).size(), 1);\n const LogicalBuffer* source_logical_buffer =\n operand_points_to.element(logical_buffer->index())[0];\n buffer =\n &buffers_.at(logical_buffer_to_buffer_id.at(source_logical_buffer));\n buffer->has_indirect_uses = true;\n buffer->live_out =\n buffer->live_out || ContainsKey(live_out_set, logical_buffer);\n bool unused;\n for (ItemUse& user_item : GetUsers(instruction_list_, logical_buffer,\n points_to_analysis, &unused)) {\n auto existing_user_it = absl::c_find_if(\n buffer->users,\n [&](const ItemUse& use) { return user_item.user == use.user; });\n if (existing_user_it == buffer->users.end()) {\n buffer->unfinished_user_count++;\n user_item.user->buffers_used.push_back(buffer->id);\n buffer->users.push_back(user_item);\n }\n }\n } else {\n buffer = &CreateBufferFromLogicalBuffer(\n logical_buffer, points_to_analysis,\n ContainsKey(live_out_set, logical_buffer));\n item->buffers_defined.push_back(buffer->id);\n for (ItemUse& user : buffer->users) {\n if (!absl::c_linear_search(user.user->buffers_used, buffer->id)) {\n user.user->buffers_used.push_back(buffer->id);\n }\n }\n }\n logical_buffer_to_buffer_id[logical_buffer] = buffer->id;\n }\n for (const LogicalBuffer* logical_buffer :\n points_to_analysis.GetPointsToSet(instruction).CreateFlattenedSet()) {\n item->buffers_output.push_back(\n logical_buffer_to_buffer_id[logical_buffer]);\n }\n }\n XLA_VLOG_LINES(10, ToString());\n DCHECK(Check());\n}\nvoid MemoryUsageTracker::CountAllocatedMemory(Item* item) {\n for (BufferId buffer_id : item->buffers_defined) {\n VLOG(3) << \" Buffer \" << buffers_.at(buffer_id).ToString()\n << \" is now live.\";\n memory_usage_ += AllocatedSize(buffer_id);\n }\n}\nabsl::Status MemoryUsageTracker::CountFreedMemory(Item* item) {\n for (BufferId buffer_id : item->buffers_used) {\n Buffer& buffer = buffers_.at(buffer_id);\n buffer.unfinished_user_count--;\n TF_RET_CHECK(buffer.unfinished_user_count >= 0)\n << buffer.ToString() << \" has negative unfinished user count.\";\n if (buffer.unfinished_user_count == 0) {\n VLOG(3) << \" \" << buffer.ToString() << \" is now dead.\";\n memory_usage_ -= AllocatedSize(buffer_id);\n }\n }\n for (BufferId buffer_id : item->buffers_defined) {\n const Buffer& buffer = buffers_.at(buffer_id);\n if (buffer.unfinished_user_count == 0) {\n VLOG(3) << \" \" << buffer.ToString() << \" is immediately dead.\";\n memory_usage_ -= AllocatedSize(buffer_id);\n }\n }\n return absl::OkStatus();\n}\nabsl::Status MemoryUsageTracker::BeginInstruction(Item* item) {\n const HloInstruction* instruction = item->instruction;\n VLOG(3) << \"BeginInstruction \" << instruction->name();\n TF_RET_CHECK(in_progress_item_ == nullptr);\n in_progress_item_ = item;\n item->placed = true;\n CountAllocatedMemory(item);\n VLOG(3) << \" memory usage = \" << memory_usage_;\n VLOG(10) << ToString();\n if (VLOG_IS_ON(1)) {\n DCHECK(Check());\n }\n return absl::OkStatus();\n}\nabsl::Status MemoryUsageTracker::EndInstruction() {\n TF_RET_CHECK(in_progress_item_ != nullptr);\n VLOG(3) << \"EndInstruction \" << in_progress_item_->instruction->name();\n TF_RETURN_IF_ERROR(CountFreedMemory(in_progress_item_));\n in_progress_item_ = nullptr;\n VLOG(3) << \" memory usage = \" << memory_usage_;\n VLOG(10) << ToString();\n if (VLOG_IS_ON(1)) {\n DCHECK(Check());\n }\n return absl::OkStatus();\n}\nint64_t MemoryUsageTracker::MemoryReducedIfCompressed(\n const Item* item, const Shape& compact_shape) const {\n CHECK_NE(in_progress_item_, nullptr);\n if (!item->placed || item == in_progress_item_) {\n return 0;\n }\n int64_t memory_reduced = 0;\n CHECK_EQ(item->buffers_output.size(), 1);\n BufferId buffer_id = item->buffers_output[0];\n if (IsCurrentlyLive(buffer_id) && !IsInUse(buffer_id) &&\n IsInstructionCurrentlyLive(item)) {\n const Buffer& buffer = buffers_.at(buffer_id);\n memory_reduced += buffer.size;\n int64_t compact_shape_size =\n options_.hlo_cost_analysis.GetShapeSize(compact_shape);\n memory_reduced -= compact_shape_size;\n }\n return memory_reduced;\n}\nint64_t MemoryUsageTracker::MemoryReducedIfRematerialized(\n absl::Span items) const {\n CHECK_NE(in_progress_item_, nullptr);\n int64_t memory_reduced = 0;\n absl::flat_hash_set remat_candidates;\n for (const Item* item : items) {\n if (!item->placed || item == in_progress_item_) {\n LOG(WARNING) << \"Unplaced item or in progress item being checked for \"\n \"rematerialization.\";\n return 0;\n }\n for (BufferId buffer_id : item->buffers_defined) {\n const Buffer& buffer = buffers_.at(buffer_id);\n if (buffer.has_indirect_uses || buffer.live_out ||\n buffer.index.size() > 1) {\n return 0;\n }\n if (IsInUse(buffer_id)) {\n return 0;\n }\n if (IsCurrentlyLive(buffer_id)) {\n memory_reduced += AllocatedSize(buffer_id);\n }\n }\n for (BufferId buffer_id : item->buffers_used) {\n if (!IsCurrentlyLive(buffer_id)) {\n Item* defining_instruction =\n buffers_.at(buffer_id).defining_instruction;\n if (!remat_candidates.contains(defining_instruction)) {\n memory_reduced -= AllocatedSize(buffer_id);\n }\n }\n }\n remat_candidates.insert(item);\n }\n return memory_reduced;\n}\nstd::tuple MemoryUsageTracker::GetPlacedAndUnplacedUsers(\n const UsesList& uses) const {\n UsesList placed_users, unplaced_users;\n for (const ItemUse& use : uses) {\n if (use.user->placed) {\n DCHECK(IsFinished(use.user)) << use.user->instruction->name();\n placed_users.push_back(use);\n } else {\n unplaced_users.push_back(use);\n }\n }\n return {placed_users, unplaced_users};\n}\nvoid MemoryUsageTracker::ReplaceUsesInUsersOfBuffer(Buffer& buffer,\n BufferId old_id) const {\n for (ItemUse& use : buffer.users) {\n BufferIdList& buffers_used = use.user->buffers_used;\n absl::c_replace(buffers_used, old_id, buffer.id);\n }\n}\nabsl::Status MemoryUsageTracker::AddCompressInstructions(\n Item* original_item, Item* compressed_item, Item* uncompressed_item) {\n CHECK(original_item->placed)\n << \"Compressing instruction, but the original is not yet placed.\";\n CHECK_EQ(original_item->buffers_output.size(), 1)\n << \"Only compressing items which have a single output buffer\";\n memory_usage_ -= options_.hlo_cost_analysis.GetShapeSize(\n original_item->instruction->shape());\n memory_usage_ += options_.hlo_cost_analysis.GetShapeSize(\n compressed_item->instruction->shape());\n BufferId original_buffer_id = original_item->buffers_output[0];\n Buffer& original_buffer = buffers_.at(original_buffer_id);\n auto [placed_users, unplaced_users] =\n GetPlacedAndUnplacedUsers(original_buffer.users);\n original_buffer.users = std::move(placed_users);\n original_buffer.unfinished_user_count = 0;\n original_buffer.users.push_back(ItemUse{compressed_item, 0, std::nullopt});\n ShapeIndex copied_index = original_buffer.index;\n Buffer& compressed_buffer =\n NewBuffer(compressed_item, compressed_item->instruction->shape(),\n copied_index, {ItemUse{uncompressed_item, 0, std::nullopt}},\n false,\n false);\n compressed_item->buffers_used = original_item->buffers_output;\n compressed_item->buffers_output = {compressed_buffer.id};\n compressed_item->buffers_defined.push_back(compressed_buffer.id);\n Buffer& uncompressed_buffer =\n NewBuffer(uncompressed_item, uncompressed_item->instruction->shape(),\n copied_index, std::move(unplaced_users), false,\n false);\n uncompressed_item->buffers_used = {compressed_item->buffers_output[0]};\n uncompressed_item->buffers_output = {uncompressed_buffer.id};\n uncompressed_item->buffers_defined = {uncompressed_buffer.id};\n ReplaceUsesInUsersOfBuffer(uncompressed_buffer, original_buffer_id);\n return absl::OkStatus();\n}\nabsl::Status MemoryUsageTracker::AddRematerializedInstruction(\n Item* original_item, Item* remat_item, absl::Span indirect_users) {\n VLOG(3) << \"AddRematerializedInstruction: original_instruction = \"\n << original_item->instruction->name()\n << \", remat_instruction = \" << remat_item->instruction->name();\n TF_RET_CHECK(in_progress_item_ != nullptr);\n TF_RET_CHECK(original_item->placed) << original_item->instruction->name();\n TF_RET_CHECK(!remat_item->placed) << remat_item->instruction->name();\n remat_item->buffers_used = original_item->buffers_used;\n for (BufferId buffer_id : original_item->buffers_used) {\n Buffer& buffer = buffers_.at(buffer_id);\n if (buffer.unfinished_user_count == 0) {\n memory_usage_ += AllocatedSize(buffer.id);\n }\n buffer.unfinished_user_count++;\n absl::InlinedVector filtered_users;\n std::copy_if(buffer.users.begin(), buffer.users.end(),\n std::back_inserter(filtered_users),\n [&](const ItemUse& iu) { return iu.user == original_item; });\n for (ItemUse& u : filtered_users) {\n buffer.users.push_back(ItemUse{remat_item, u.operand_number, u.index});\n }\n }\n const absl::flat_hash_set indirect_users_set(indirect_users.begin(),\n indirect_users.end());\n for (BufferId old_buffer_id : original_item->buffers_defined) {\n Buffer& old_buffer = buffers_.at(old_buffer_id);\n UsesList placed_users;\n UsesList unplaced_users;\n for (ItemUse& user : old_buffer.users) {\n if (user.user->placed) {\n placed_users.push_back(user);\n } else {\n if (!IsSupportedIndirectUser(user.user->instruction) ||\n indirect_users_set.contains(user.user)) {\n unplaced_users.push_back(user);\n } else {\n CHECK(user.user->buffers_defined.empty())\n << \"Buffers defined expected to be empty for use passthrough \"\n \"instructions\";\n user.user->buffers_output.clear();\n user.user->buffers_used.clear();\n }\n }\n }\n old_buffer.users = std::move(placed_users);\n old_buffer.unfinished_user_count = 0;\n memory_usage_ -= AllocatedSize(old_buffer.id);\n Buffer& new_buffer =\n RematerializeBuffer(old_buffer, remat_item, std::move(unplaced_users));\n remat_item->buffers_defined.push_back(new_buffer.id);\n remat_item->buffers_output.push_back(new_buffer.id);\n auto update_buffers = [old_buffer_id, new_buffer_id = new_buffer.id](\n BufferIdList& to_update) {\n std::replace(to_update.begin(), to_update.end(), old_buffer_id,\n new_buffer_id);\n };\n for (ItemUse& user : new_buffer.users) {\n update_buffers(user.user->buffers_used);\n update_buffers(user.user->buffers_output);\n }\n }\n for (Item* indirect_user : indirect_users) {\n const Item* source_item =\n instruction_list_.GetItem(indirect_user->instruction->operand(0));\n switch (indirect_user->instruction->opcode()) {\n case HloOpcode::kBitcast: {\n if (IsSupportedIndirectUser(source_item->instruction)) {\n indirect_user->buffers_used = source_item->buffers_output;\n indirect_user->buffers_output = source_item->buffers_output;\n } else {\n indirect_user->buffers_used = source_item->buffers_defined;\n indirect_user->buffers_output = source_item->buffers_defined;\n }\n break;\n }\n case HloOpcode::kGetTupleElement: {\n const HloGetTupleElementInstruction* gte =\n Cast(indirect_user->instruction);\n for (BufferId buffer_id : source_item->buffers_defined) {\n const Buffer& def_buffer = buffers_.at(buffer_id);\n if (def_buffer.index == ShapeIndex{gte->tuple_index()}) {\n indirect_user->buffers_output.push_back(buffer_id);\n }\n if (def_buffer.index.empty()) {\n indirect_user->buffers_used.push_back(buffer_id);\n }\n }\n break;\n }\n default: {\n LOG(FATAL) << \"Unsupported indirect instruction with opcode \"\n << indirect_user->instruction->opcode();\n break;\n }\n }\n for (BufferId buffer_id : indirect_user->buffers_used) {\n Buffer& buffer = buffers_.at(buffer_id);\n buffer.unfinished_user_count++;\n buffer.users.push_back(ItemUse{indirect_user, 0, std::nullopt});\n }\n }\n VLOG(3) << \" memory usage = \" << memory_usage_;\n XLA_VLOG_LINES(10, ToString());\n DCHECK(Check());\n return absl::OkStatus();\n}\nabsl::Status MemoryUsageTracker::AddHostOffloadCopyInstructions(\n Item* original_item, Item* copy_start_to_host_item,\n Item* copy_done_to_host_item, Item* copy_start_to_device_item,\n Item* copy_done_to_device_item) {\n CHECK_EQ(original_item->buffers_defined.size(), 1);\n CHECK_EQ(original_item->buffers_output.size(), 1);\n BufferId original_buffer_id = original_item->buffers_output[0];\n Buffer& original_buffer = buffers_.at(original_buffer_id);\n auto [placed_users, unplaced_users] =\n GetPlacedAndUnplacedUsers(original_buffer.users);\n original_buffer.users = std::move(placed_users);\n original_buffer.users.emplace_back(copy_start_to_host_item, 0, std::nullopt);\n original_buffer.unfinished_user_count = 1;\n CHECK_EQ(copy_start_to_host_item->instruction->shape().tuple_shapes_size(), 3)\n << \"copy_start_to_host_item's shape is \"\n << copy_start_to_host_item->instruction->shape().ToString();\n CHECK_EQ(copy_start_to_device_item->instruction->shape().tuple_shapes_size(),\n 3)\n << \"copy_start_to_device_item's shape is \"\n << copy_start_to_device_item->instruction->shape().ToString();\n BufferId copy_start_to_host_device_buffer_id =\n NewBuffer(copy_start_to_host_item,\n copy_start_to_host_item->instruction->shape().tuple_shapes(1),\n ShapeIndex(),\n UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}},\n false, false)\n .id;\n BufferId copy_start_to_host_context_buffer_id =\n NewBuffer(copy_start_to_host_item,\n copy_start_to_host_item->instruction->shape().tuple_shapes(2),\n ShapeIndex(),\n UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}},\n false, false)\n .id;\n BufferId copy_start_to_device_device_buffer_id =\n NewBuffer(copy_start_to_device_item,\n copy_start_to_device_item->instruction->shape().tuple_shapes(0),\n ShapeIndex(),\n UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}},\n false, false)\n .id;\n BufferId copy_start_to_device_context_buffer_id =\n NewBuffer(copy_start_to_device_item,\n copy_start_to_device_item->instruction->shape().tuple_shapes(2),\n ShapeIndex(),\n UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}},\n false, false)\n .id;\n BufferId copy_done_to_device_buffer_id =\n NewBuffer(copy_done_to_device_item,\n copy_done_to_device_item->instruction->shape(), ShapeIndex(),\n std::move(unplaced_users), false,\n false)\n .id;\n copy_start_to_host_item->buffers_used = original_item->buffers_output;\n copy_start_to_host_item->buffers_output = {\n copy_start_to_host_device_buffer_id,\n copy_start_to_host_context_buffer_id};\n copy_start_to_host_item->buffers_defined = {\n copy_start_to_host_device_buffer_id,\n copy_start_to_host_context_buffer_id};\n copy_done_to_host_item->buffers_used =\n copy_start_to_host_item->buffers_output;\n copy_done_to_host_item->buffers_output = {};\n copy_done_to_host_item->buffers_defined = {};\n copy_start_to_device_item->buffers_used =\n copy_done_to_host_item->buffers_output;\n copy_start_to_device_item->buffers_output = {\n copy_start_to_device_device_buffer_id,\n copy_start_to_device_context_buffer_id};\n copy_start_to_device_item->buffers_defined = {\n copy_start_to_device_device_buffer_id,\n copy_start_to_device_context_buffer_id};\n copy_done_to_device_item->buffers_used =\n copy_start_to_device_item->buffers_output;\n copy_done_to_device_item->buffers_output = {copy_done_to_device_buffer_id};\n copy_done_to_device_item->buffers_defined = {copy_done_to_device_buffer_id};\n Buffer& copy_done_to_device_buffer =\n buffers_.at(copy_done_to_device_buffer_id);\n ReplaceUsesInUsersOfBuffer(copy_done_to_device_buffer, original_buffer_id);\n if (copy_start_to_host_item->placed) {\n CountAllocatedMemory(copy_start_to_host_item);\n TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_host_item));\n if (copy_done_to_host_item->placed) {\n CountAllocatedMemory(copy_done_to_host_item);\n TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_host_item));\n if (copy_start_to_device_item->placed) {\n CountAllocatedMemory(copy_start_to_device_item);\n TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_device_item));\n if (copy_done_to_device_item->placed) {\n CountAllocatedMemory(copy_done_to_device_item);\n TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_device_item));\n }\n }\n }\n }\n return absl::OkStatus();\n}\nstd::string MemoryUsageTracker::ToString() const {\n std::string output =\n absl::StrCat(\"MemoryUsageTracker for \", computation_->name(), \"\\n\");\n absl::StrAppend(&output,\n \"Memory usage: \", HumanReadableNumBytes(memory_usage()), \" (\",\n memory_usage(), \" bytes)\");\n for (auto* item = instruction_list_.first(); item != nullptr;\n item = instruction_list_.next(item)) {\n const HloInstruction* instruction = item->instruction;\n absl::string_view inprogress =\n item == in_progress_item_ ? \" in-progress\" : \"\";\n absl::string_view placed = item->placed ? \" placed\" : \"\";\n absl::StrAppend(&output, \" \", instruction->name(), inprogress, placed,\n \"\\n Defines:\\n\");\n for (BufferId buffer_id : item->buffers_defined) {\n const Buffer& buffer = buffers_[buffer_id];\n absl::string_view live = IsCurrentlyLive(buffer_id) ? \" live\" : \"\";\n absl::StrAppend(&output, \" \", buffer.ToString(), live, \", \",\n buffer.unfinished_user_count, \" unfinished uses\\n\");\n }\n absl::StrAppend(&output, \" Outputs:\\n\");\n for (BufferId buffer_id : item->buffers_output) {\n absl::StrAppend(&output, \" \", buffers_[buffer_id].ToString(), \"\\n\");\n }\n absl::StrAppend(&output, \" Uses:\\n\");\n for (BufferId buffer_id : item->buffers_used) {\n absl::StrAppend(&output, \" \", buffers_[buffer_id].ToString(), \"\\n\");\n }\n }\n return output;\n}\nabsl::StatusOr MemoryUsageTracker::GetCompactShape(\n const HloInstruction* hlo) {\n auto it = compact_shape_.find(hlo);\n if (it != compact_shape_.end()) {\n return &it->second;\n }\n const Shape& original_shape = hlo->shape();\n TF_ASSIGN_OR_RETURN(Shape min_shape,\n options_.compact_shape_function(original_shape));\n return &compact_shape_.emplace(hlo, min_shape).first->second;\n}\nbool MemoryUsageTracker::Check() const {\n auto elements_are_unique = [](const BufferIdList& vec) {\n return vec.size() == std::set(vec.begin(), vec.end()).size();\n };\n for (auto* instruction : computation_->instructions()) {\n const BufferIdList& defined_buffers =\n instruction_list_.GetItem(instruction)->buffers_defined;\n CHECK(elements_are_unique(defined_buffers))\n << \"Instruction \" << instruction->name()\n << \" does not have unique defined buffers: \"\n << absl::StrJoin(defined_buffers, \", \",\n [this](std::string* out, BufferId buffer_id) {\n absl::StrAppend(out,\n buffers_.at(buffer_id).ToString());\n });\n for (const Buffer& buffer : buffers_) {\n if (buffer.defining_instruction->instruction == instruction) {\n CHECK(absl::c_linear_search(defined_buffers, buffer.id))\n << \"Instruction \" << instruction->name()\n << \" defined buffers is missing: \" << buffer.ToString();\n }\n }\n }\n for (auto* instruction : computation_->instructions()) {\n const BufferIdList& used_buffers =\n instruction_list_.GetItem(instruction)->buffers_used;\n CHECK(elements_are_unique(used_buffers))\n << \"Instruction \" << instruction->name()\n << \" does not have unique used buffers: \"\n << absl::StrJoin(used_buffers, \", \",\n [this](std::string* out, BufferId buffer_id) {\n absl::StrAppend(out,\n buffers_.at(buffer_id).ToString());\n });\n }\n for (const Buffer& buffer : buffers_) {\n int64_t unfinished_uses = 0;\n absl::flat_hash_set already_counted_user;\n for (const ItemUse& user : buffer.users) {\n const BufferIdList& used_buffers = user.user->buffers_used;\n CHECK(absl::c_linear_search(used_buffers, buffer.id))\n << \"Instruction \" << user.user->instruction->name()\n << \" used buffers is missing \" << buffer.ToString();\n if (!IsFinished(user.user) &&\n already_counted_user.insert(user.user).second) {\n unfinished_uses++;\n }\n }\n CHECK_EQ(buffer.unfinished_user_count, unfinished_uses)\n << \"Incorrect unplaced use count for \" << buffer.ToString();\n }\n return true;\n}\nstd::vector GetInitialBlock(const InstructionList& instruction_list,\n const MemoryUsageTracker& tracker,\n Item* start_item, int min_block_size) {\n std::vector item_block;\n Item* curr_item = start_item;\n for (int i = 0; i < min_block_size; ++i) {\n if (curr_item == nullptr || !curr_item->placed ||\n tracker.IsInProgressItem(curr_item)) {\n break;\n }\n item_block.push_back(curr_item);\n curr_item = instruction_list.next(curr_item);\n }\n return item_block;\n}\nbool AnyDenylistedOrNonRematerializable(\n const std::vector& block,\n absl::flat_hash_map* rematerializable_map) {\n for (auto* item : block) {\n if (item->denylisted) {\n return true;\n }\n if (!CanBeRematerialized(item->instruction, rematerializable_map)) {\n return true;\n }\n }\n return false;\n}\nint64_t MemoryUsageTracker::BytesUsedByBuffers(\n const Item* item, bool only_count_unplaced_users) const {\n int64_t bytes_used_by_buffers = 0;\n for (const auto& buffer_id : item->buffers_defined) {\n VLOG(3) << \" buffer \" << buffer_id << \"'s users are \"\n << absl::StrJoin(buffers_.at(buffer_id).users, \", \",\n [](std::string* str, const auto& use) {\n str->append(use.user->instruction->name());\n });\n for (const auto& use : buffers_.at(buffer_id).users) {\n if (!only_count_unplaced_users || !use.user->placed) {\n bytes_used_by_buffers += AllocatedSize(buffer_id);\n break;\n }\n }\n }\n return bytes_used_by_buffers;\n}\nstd::optional MemoryUsageTracker::GetCostOfCompression(\n const Item* candidate_item, int64_t memory_limit_bytes,\n int64_t peak_memory_bytes) {\n CHECK(candidate_item != nullptr);\n if (candidate_item->buffers_output.size() != 1) {\n HloInstruction* candidate_instruction = candidate_item->instruction;\n VLOG(2) << \" \" << candidate_instruction->name()\n << \" has more than one output buffer; cannot offload to host.\";\n return {};\n }\n const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]);\n if (!candidate_item->placed || candidate_item == in_progress_item_ ||\n output_buffer.live_out) {\n return {};\n }\n const Shape& original_shape = candidate_item->instruction->shape();\n if (!original_shape.IsArray()) {\n return {};\n }\n const Shape* compact_shape =\n GetCompactShape(candidate_item->instruction).value();\n const int64_t memory_reduced =\n MemoryReducedIfCompressed(candidate_item, *compact_shape);\n const int64_t size = options_.hlo_cost_analysis.GetShapeSize(\n candidate_item->instruction->shape());\n const int64_t reduced_size =\n options_.hlo_cost_analysis.GetShapeSize(*compact_shape);\n if (memory_reduced > 0 && size + reduced_size < peak_memory_bytes) {\n return memory_limit_bytes / memory_reduced;\n } else {\n return {};\n }\n}\nstd::optional MemoryUsageTracker::GetCostOfHostOffload(\n const Item* candidate_item, int64_t memory_limit_bytes) const {\n CHECK(candidate_item != nullptr);\n HloInstruction* candidate_instruction = candidate_item->instruction;\n VLOG(2)\n << \"Considering host offload as an option for remat. looking at instr \"\n << candidate_instruction->name();\n if (candidate_item->buffers_output.size() != 1) {\n VLOG(2) << \" \" << candidate_instruction->name()\n << \" has more than one output buffer; cannot offload to host.\";\n return {};\n }\n for (auto buffer_id : candidate_item->buffers_defined) {\n for (auto use : buffers_.at(buffer_id).users) {\n if (use.user->instruction->opcode() == HloOpcode::kBitcast) {\n VLOG(3) << \" \" << candidate_item->instruction->name()\n << \" has a user which is a bitcast instruction(\"\n << use.user->instruction->name()\n << \"); cannot offload \"\n \"to host.\";\n return {};\n } else if (use.user->instruction->opcode() == HloOpcode::kTuple) {\n VLOG(3) << \" \" << candidate_item->instruction->name()\n << \" has a user which is a tuple instruction(\"\n << use.user->instruction->name()\n << \"); cannot offload \"\n \"to host.\";\n return {};\n }\n }\n }\n const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]);\n if (!candidate_item->placed || candidate_item == in_progress_item_ ||\n output_buffer.live_out) {\n VLOG(2) << \" \" << candidate_instruction->name()\n << \" is not yet placed, is in progress, or is \\\"live_out\\\"; cannot \"\n \"offload to host.\";\n return {};\n }\n const bool current_instruction_uses_this_item = [&]() {\n if (in_progress_item_ == nullptr) {\n return false;\n }\n const auto& output_buffer_ids = candidate_item->buffers_output;\n for (const auto& output_buffer_id : output_buffer_ids) {\n const Buffer& output_buffer = buffers_.at(output_buffer_id);\n for (const auto& use : output_buffer.users) {\n if (use.user == in_progress_item_) {\n return true;\n }\n }\n }\n return false;\n }();\n if (current_instruction_uses_this_item) {\n VLOG(2) << \" \" << candidate_instruction->name()\n << \" is used by the current instruction in mem tracker (\"\n << in_progress_item_->instruction->name()\n << \"); cannot offload to host.\";\n return {};\n }\n const int64_t bytes_used_by_buffers =\n BytesUsedByBuffers(candidate_item, true);\n if (bytes_used_by_buffers == 0) {\n VLOG(2) << \" \" << candidate_instruction->name()\n << \" consumes no memory; no point in offloading.\";\n return {};\n }\n const auto [placed_uses, unplaced_uses] =\n GetPlacedAndUnplacedUsers(output_buffer.users);\n const Item* last_placed_user = nullptr;\n const Item* first_unplaced_user = nullptr;\n for (const auto* item = instruction_list_.first(); item != nullptr;\n item = instruction_list_.next(item)) {\n if (absl::c_find_if(placed_uses, [&](const auto& use) {\n return use.user == item;\n }) != placed_uses.end()) {\n last_placed_user = item;\n }\n if (first_unplaced_user == nullptr &&\n absl::c_find_if(unplaced_uses, [&](const auto& use) {\n return use.user == item;\n }) != unplaced_uses.end()) {\n first_unplaced_user = item;\n break;\n }\n }\n if (last_placed_user == nullptr) {\n VLOG(3) << \" \" << candidate_instruction->name()\n << \" has no placed users, starting search at self.\";\n last_placed_user = candidate_item;\n }\n CHECK(first_unplaced_user != nullptr)\n << \"Didn't find any unplaced user for instruction \\\"\"\n << candidate_instruction->name()\n << \"\\\". There must be a \"\n \"bug in how we calculate how much memory this item uses.\";\n float time_spent_before_next_use = 0.0;\n for (auto* item = last_placed_user; item != first_unplaced_user;\n item = instruction_list_.next(item)) {\n time_spent_before_next_use += std::max(\n 0.0f, options_.hlo_cost_analysis.optimal_seconds(*item->instruction));\n }\n if (time_spent_before_next_use <= 0.0) {\n return {};\n }\n const float time_spent_on_copies =\n bytes_used_by_buffers / options_.host_memory_offload_config\n ->bandwidth_to_host_bytes_per_second +\n bytes_used_by_buffers / options_.host_memory_offload_config\n ->bandwidth_from_host_bytes_per_second;\n if (time_spent_before_next_use < time_spent_on_copies) {\n return {};\n }\n VLOG(3) << \" \" << candidate_instruction->name() << \" has enough time (\"\n << time_spent_before_next_use\n << \") between itself and next use. The memcpy out and back will take \"\n << time_spent_on_copies << \"s\";\n return memory_limit_bytes / bytes_used_by_buffers;\n}\nstd::optional MemoryUsageTracker::GetCostOfRecompute(\n const std::vector& candidate_items,\n int64_t memory_limit_bytes) const {\n for (auto* item : candidate_items) {\n HloInstruction* candidate = item->instruction;\n if (std::any_of(\n candidate->control_successors().begin(),\n candidate->control_successors().end(),\n [this](const HloInstruction* inst) { return IsPlaced(inst); })) {\n return {};\n }\n }\n VLOG(5) << \"Block contains:\";\n for (auto* hlo : candidate_items) {\n VLOG(5) << hlo->instruction->name();\n }\n const int64_t memory_reduced = MemoryReducedIfRematerialized(candidate_items);\n if (memory_reduced <= 0) {\n return {};\n }\n return RematerializationCost(candidate_items, memory_reduced,\n memory_limit_bytes);\n}\nstd::tuple, RematStrategy, int>\nMemoryUsageTracker::PickRematerializationCandidates(\n const InstructionList& instruction_list, int64_t memory_limit_bytes,\n absl::flat_hash_map* rematerializable_map,\n int min_block_size, int max_block_size, int64_t peak_memory_bytes) {\n std::vector best_items;\n int64_t best_cost = std::numeric_limits::max();\n RematStrategy best_strategy;\n int effort = 0;\n VLOG(5) << \"Picking candidate block with size in [\" << min_block_size << \", \"\n << max_block_size << \"]\";\n for (auto* start_item = instruction_list.first_skip_node();\n start_item != nullptr;\n start_item = instruction_list.next_skip_node(start_item)) {\n std::vector block =\n GetInitialBlock(instruction_list, *this, start_item, min_block_size);\n if (block.size() < min_block_size) {\n break;\n }\n if (AnyDenylistedOrNonRematerializable(block, rematerializable_map)) {\n continue;\n }\n if (options_.remat_mode_config.compress && block.size() == 1) {\n auto cost =\n GetCostOfCompression(block[0], memory_limit_bytes, peak_memory_bytes);\n ++effort;\n if (cost && *cost < best_cost) {\n VLOG(1) << \"Found new best cost; from \" << best_cost << \" to \" << *cost\n << \" with strategy kCompress on block of size \" << block.size();\n best_strategy.kind = RematStrategy::kCompress;\n best_strategy.compact_shape =\n *GetCompactShape(block[0]->instruction).value();\n best_items = block;\n best_cost = *cost;\n }\n }\n if (options_.remat_mode_config.host_offload && block.size() == 1) {\n auto cost = GetCostOfHostOffload(block[0], memory_limit_bytes);\n ++effort;\n if (cost && *cost < best_cost) {\n VLOG(1) << \"Found new best cost; from \" << best_cost << \" to \" << *cost\n << \" with strategy kHostOffload on block of size \"\n << block.size();\n best_strategy.kind = RematStrategy::kHostOffload;\n best_items = block;\n best_cost = *cost;\n }\n }\n if (!options_.remat_mode_config.recompute) {\n continue;\n }\n while (block.size() <= max_block_size) {\n auto cost = GetCostOfRecompute(block, memory_limit_bytes);\n ++effort;\n if (cost && *cost < best_cost) {\n VLOG(1) << \"Found new best cost; from \" << best_cost << \" to \" << *cost\n << \" with strategy kRecompute on block of size \"\n << block.size();\n best_strategy.kind = RematStrategy::kRecompute;\n best_items = block;\n best_cost = *cost;\n }\n auto* last_item = block[block.size() - 1];\n auto* next_item = instruction_list.next(last_item);\n if (next_item == nullptr || next_item->denylisted || !next_item->placed ||\n next_item == in_progress_item_ ||\n !CanBeRematerialized(next_item->instruction, rematerializable_map)) {\n break;\n }\n block.push_back(next_item);\n }\n }\n return {best_items, best_strategy, effort};\n}\nbool MemoryUsageTracker::HasUnplacedUsers(Item* item) const {\n for (BufferId buffer_id : item->buffers_defined) {\n const Buffer& buffer = buffers_.at(buffer_id);\n for (const ItemUse& user : buffer.users) {\n if (!user.user->placed) {\n return true;\n }\n }\n }\n return false;\n}\nUsesList MemoryUsageTracker::GetItemUses(Item* item) const {\n UsesList combined_users;\n for (BufferId buffer_id : item->buffers_defined) {\n const Buffer& buffer = buffers_.at(buffer_id);\n for (const ItemUse& user : buffer.users) {\n combined_users.push_back(user);\n }\n }\n return combined_users;\n}\nabsl::StatusOr RematerializeInstructions(\n MemoryUsageTracker* memory_tracker, std::vector* best_items,\n absl::flat_hash_set* remat_move_instructions,\n InstructionList* instruction_list, HloSchedule* schedule,\n HloRematerialization* rematerialization) {\n int64_t net_instructions_added = 0;\n std::vector instruction_names(best_items->size());\n for (int i = best_items->size() - 1; i >= 0; --i) {\n Item* best_item = (*best_items)[i];\n HloInstruction* best = best_item->instruction;\n instruction_names[i] = best->name();\n HloComputation* computation = best->parent();\n if (!memory_tracker->HasUnplacedUsers(best_item)) {\n continue;\n }\n HloCloneContext context(computation->parent());\n HloInstruction* remat =\n computation->AddInstruction(best->Clone(\"remat\", &context));\n for (auto& cloned_computation_pair : context.cloned_computations()) {\n if (!schedule->is_computation_scheduled(cloned_computation_pair.first)) {\n continue;\n }\n HloInstructionSequence& sequence =\n schedule->GetOrCreateSequence(cloned_computation_pair.second);\n HloInstructionSequence& old_sequence =\n schedule->GetOrCreateSequence(cloned_computation_pair.first);\n for (HloInstruction* instr : old_sequence.instructions()) {\n sequence.push_back(instr);\n }\n }\n if (DynCast(best) &&\n DynCast(best)->channel_id()) {\n remat->set_channel_id(rematerialization->NextChannelId());\n }\n TF_RETURN_IF_ERROR(remat->CopyAllControlDepsFrom(best));\n Item* remat_item = instruction_list->CreateItem(remat);\n absl::InlinedVector indirect_users;\n absl::flat_hash_map gte_cache;\n for (auto& user : memory_tracker->GetItemUses(best_item)) {\n if (!memory_tracker->IsPlaced(user.user->instruction)) {\n VLOG(2) << \" Replacing use of \" << best->name() << \" in \"\n << user.user->instruction->name() << \" with \" << remat->name();\n HloInstruction* remat_use = remat;\n HloInstruction* const user_operand =\n user.user->instruction->mutable_operand(user.operand_number);\n if (remat_use == user_operand) {\n continue;\n }\n if (user.index && remat_use->shape() != user_operand->shape()) {\n auto cached_gte = gte_cache.find(*user.index);\n if (cached_gte == gte_cache.end()) {\n remat_use = computation->AddInstruction(\n HloInstruction::CreateGetTupleElement(\n ShapeUtil::GetTupleElementShape(remat_use->shape(),\n *user.index),\n remat_use, *user.index),\n \"gte.remat\");\n indirect_users.push_back(instruction_list->CreateItem(remat_use));\n gte_cache[*user.index] = remat_use;\n } else {\n remat_use = cached_gte->second;\n }\n }\n if (user_operand->shape() != remat_use->shape()) {\n remat_use = computation->AddInstruction(\n HloInstruction::CreateBitcast(user_operand->shape(), remat_use),\n \"bitcast.remat\");\n indirect_users.push_back(instruction_list->CreateItem(remat_use));\n }\n TF_RETURN_IF_ERROR(user.user->instruction->ReplaceOperandWith(\n user.operand_number, remat_use));\n }\n }\n TF_RETURN_IF_ERROR(memory_tracker->AddRematerializedInstruction(\n best_item, remat_item, absl::MakeSpan(indirect_users)));\n ItemList place_before;\n const absl::flat_hash_set indirect_users_set(indirect_users.begin(),\n indirect_users.end());\n for (auto user : remat->users()) {\n if (!indirect_users_set.contains(instruction_list->GetItem(user))) {\n place_before.push_back(instruction_list->GetItem(user));\n }\n }\n for (auto* indirect_user : indirect_users) {\n for (auto user : indirect_user->instruction->users()) {\n if (!indirect_users_set.contains(instruction_list->GetItem(user))) {\n place_before.push_back(instruction_list->GetItem(user));\n }\n }\n }\n for (auto* operand : remat->operands()) {\n for (auto* operand_user : operand->users()) {\n if (operand_user != remat) {\n Item* operand_user_item = instruction_list->GetItem(operand_user);\n if (!operand_user_item->placed) {\n place_before.push_back(operand_user_item);\n }\n }\n }\n }\n for (auto successor : remat->control_successors()) {\n Item* successor_item = instruction_list->GetItem(successor);\n CHECK(!successor_item->placed) << successor_item->instruction->name();\n place_before.push_back(successor_item);\n }\n instruction_list->InsertBeforeInstructions(remat_item, place_before);\n for (auto* bitcast : indirect_users) {\n instruction_list->InsertBeforeInstructions(bitcast, place_before);\n }\n std::function uses_empty = [&](HloInstruction* i) {\n for (auto* u : i->users()) {\n if (!IsSupportedIndirectUser(u) || !uses_empty(u)) {\n return false;\n }\n }\n return true;\n };\n if (uses_empty(best)) {\n VLOG(2) << best->name() << \" is now dead\";\n if (ContainsKey(*remat_move_instructions, best)) {\n instruction_list->Denylist(remat);\n }\n remat_move_instructions->insert(remat);\n net_instructions_added += indirect_users.size();\n } else {\n net_instructions_added += indirect_users.size() + 1;\n }\n for (auto* indirect_user : indirect_users) {\n instruction_list->Denylist(indirect_user->instruction);\n }\n if (HloDataflowAnalysis::IsAsynchronousOperationStart(best->opcode()) ||\n HloDataflowAnalysis::IsAsynchronousOperationDone(best->opcode())) {\n VLOG(2) << \"The old instruction \" << best->name()\n << \" is an async op. Removing to maintain one start to one done \"\n \"invariant to keep the HLO valid.\";\n TF_RETURN_IF_ERROR(best->DropAllControlDeps());\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(best));\n }\n }\n return net_instructions_added;\n}\nabsl::StatusOr CompressInstruction(MemoryUsageTracker* memory_tracker,\n Item* best_item,\n const Shape& compact_shape,\n InstructionList* instruction_list) {\n HloInstruction* best = best_item->instruction;\n VLOG(5) << \"Transposing instruction \" << best->name() << \" (saving \"\n << HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed(\n best_item, compact_shape))\n << \") to\" << compact_shape.ToString(true);\n HloComputation* computation = best->parent();\n HloInstruction* compressed = computation->AddInstruction(\n HloInstruction::CreateUnary(compact_shape, HloOpcode::kCopy, best),\n absl::StrCat(best->name(), \".remat_compressed\"));\n HloInstruction* uncompressed = computation->AddInstruction(\n HloInstruction::CreateUnary(best->shape(), HloOpcode::kCopy, compressed),\n absl::StrCat(best->name(), \".remat_uncompressed\"));\n Item* compressed_item = instruction_list->CreateItem(compressed);\n compressed_item->placed = true;\n Item* uncompressed_item = instruction_list->CreateItem(uncompressed);\n std::vector best_users_copy = best->users();\n for (HloInstruction* user : best_users_copy) {\n if (!memory_tracker->IsPlaced(user)) {\n VLOG(5) << \" Replacing use of \" << best->name() << \" in \" << user->name()\n << \" with \" << uncompressed->name();\n TF_RETURN_IF_ERROR(best->ReplaceUseWith(user, uncompressed));\n }\n }\n TF_RETURN_IF_ERROR(memory_tracker->AddCompressInstructions(\n best_item, compressed_item, uncompressed_item));\n ItemList place_before;\n for (auto user : uncompressed->users()) {\n place_before.push_back(instruction_list->GetItem(user));\n }\n instruction_list->Denylist(compressed_item->instruction);\n instruction_list->Denylist(uncompressed_item->instruction);\n instruction_list->InsertBeforeInstructions(uncompressed_item, place_before);\n instruction_list->InsertAfterInstructions(compressed_item, {best_item});\n return 2;\n}\nabsl::StatusOr OffloadInstruction(MemoryUsageTracker* memory_tracker,\n Item* best_item,\n InstructionList* instruction_list) {\n HloInstruction* best_instruction = best_item->instruction;\n HloComputation* computation = best_instruction->parent();\n VLOG(2) << \"Best_instruction's users: \"\n << absl::StrJoin(best_instruction->users(), \", \",\n [](std::string* str, const auto* x) {\n return str->append(x->name());\n });\n Shape instruction_shape_device = best_instruction->shape();\n Shape instruction_shape_host = best_instruction->shape();\n instruction_shape_host.mutable_layout()->set_memory_space(\n memory_tracker->options().host_memory_offload_config->host_memory_space);\n Shape context_shape = ShapeUtil::MakeShape(U32, {});\n HloInstruction* copy_start_to_host =\n computation->AddInstruction(HloInstruction::CreateCopyStart(\n ShapeUtil::MakeTupleShape({instruction_shape_host,\n instruction_shape_device, context_shape}),\n best_instruction));\n HloInstruction* copy_done_to_host =\n computation->AddInstruction(HloInstruction::CreateUnary(\n instruction_shape_host, HloOpcode::kCopyDone, copy_start_to_host));\n HloInstruction* copy_start_to_device =\n computation->AddInstruction(HloInstruction::CreateCopyStart(\n ShapeUtil::MakeTupleShape({instruction_shape_device,\n instruction_shape_host, context_shape}),\n copy_done_to_host));\n HloInstruction* copy_done_to_device = computation->AddInstruction(\n HloInstruction::CreateUnary(instruction_shape_device,\n HloOpcode::kCopyDone, copy_start_to_device));\n VLOG(3) << \"Created copy_start_to_host instr: \"\n << copy_start_to_host->ToString();\n VLOG(3) << \"Created copy_done_to_host instr: \"\n << copy_done_to_host->ToString();\n VLOG(3) << \"Created copy_start_to_device instr: \"\n << copy_start_to_device->ToString();\n VLOG(3) << \"Created copy_done_to_device instr: \"\n << copy_done_to_device->ToString();\n TF_RETURN_IF_ERROR(\n copy_start_to_host->Visit(&memory_tracker->options().hlo_cost_analysis));\n TF_RETURN_IF_ERROR(\n copy_done_to_host->Visit(&memory_tracker->options().hlo_cost_analysis));\n TF_RETURN_IF_ERROR(copy_start_to_device->Visit(\n &memory_tracker->options().hlo_cost_analysis));\n TF_RETURN_IF_ERROR(\n copy_done_to_device->Visit(&memory_tracker->options().hlo_cost_analysis));\n Item* copy_start_to_host_item =\n instruction_list->CreateItem(copy_start_to_host);\n Item* copy_done_to_host_item =\n instruction_list->CreateItem(copy_done_to_host);\n Item* copy_start_to_device_item =\n instruction_list->CreateItem(copy_start_to_device);\n Item* copy_done_to_device_item =\n instruction_list->CreateItem(copy_done_to_device);\n instruction_list->Denylist(copy_start_to_host);\n instruction_list->Denylist(copy_done_to_host);\n instruction_list->Denylist(copy_start_to_device);\n instruction_list->Denylist(copy_done_to_device);\n Item* place_before{nullptr};\n {\n ItemList place_before_list;\n for (auto user : best_instruction->users()) {\n if (user == copy_start_to_host) {\n continue;\n }\n auto item_of_user = instruction_list->GetItem(user);\n if (item_of_user->placed) {\n continue;\n }\n place_before_list.push_back(item_of_user);\n }\n CHECK(!place_before_list.empty()) << \"Have nothing to place this before!\";\n for (auto* item = instruction_list->first(); item != nullptr;\n item = instruction_list->next(item)) {\n if (absl::c_linear_search(place_before_list, item)) {\n place_before = item;\n break;\n }\n }\n }\n CHECK_NE(place_before, nullptr)\n << \"Could not find an item to place this before.\";\n auto get_first_item_after_compute_time = [&](Item* start_item, Item* end_item,\n auto successor_func,\n float time_spent_on_copy) {\n float time_so_far = 0.0;\n auto* current_item = start_item;\n while (time_so_far < time_spent_on_copy) {\n auto next_item = successor_func(current_item);\n if (next_item == end_item) {\n LOG(WARNING) << \"Didn't find enough computation before end of window\";\n break;\n }\n current_item = next_item;\n CHECK_NE(current_item, nullptr) << \"current_item is null\";\n CHECK_NE(current_item->instruction, nullptr)\n << \"current_item's instruction is null\";\n time_so_far += std::max(\n 0.0f, memory_tracker->options().hlo_cost_analysis.optimal_seconds(\n *current_item->instruction));\n }\n return current_item;\n };\n const int64_t bytes_used_by_buffers = memory_tracker->BytesUsedByBuffers(\n best_item, false);\n const float copy_to_host_time_seconds =\n bytes_used_by_buffers /\n memory_tracker->options()\n .host_memory_offload_config->bandwidth_to_host_bytes_per_second;\n const float copy_from_host_time_seconds =\n bytes_used_by_buffers /\n memory_tracker->options()\n .host_memory_offload_config->bandwidth_from_host_bytes_per_second;\n VLOG(2) << \"Item uses \" << bytes_used_by_buffers << \"B and will take \"\n << copy_to_host_time_seconds << \"s to copy to host and \"\n << copy_from_host_time_seconds << \"s to copy from host.\";\n VLOG(2) << \"Inserting \" << copy_start_to_host_item->instruction->name()\n << \" immediately after \" << best_item->instruction->name();\n instruction_list->InsertAfterInstructions(copy_start_to_host_item,\n {best_item});\n VLOG(2) << \"Inserting \" << copy_done_to_device_item->instruction->name()\n << \" immediately before \" << place_before->instruction->name();\n instruction_list->InsertBeforeInstructions(copy_done_to_device_item,\n {place_before});\n auto first_item_after_to_host_copy = get_first_item_after_compute_time(\n copy_start_to_host_item, copy_done_to_device_item,\n [&instruction_list](Item* item) { return instruction_list->next(item); },\n copy_to_host_time_seconds);\n VLOG(2) << \"Inserting \" << copy_done_to_host_item->instruction->name()\n << \" immediately after \"\n << first_item_after_to_host_copy->instruction->name();\n instruction_list->InsertAfterInstructions(copy_done_to_host_item,\n {first_item_after_to_host_copy});\n auto first_item_before_from_host_copy = get_first_item_after_compute_time(\n copy_done_to_device_item, copy_done_to_host_item,\n [&instruction_list](Item* item) { return instruction_list->prev(item); },\n copy_from_host_time_seconds);\n VLOG(2) << \"Inserting \" << copy_start_to_device_item->instruction->name()\n << \" immediately before \"\n << first_item_before_from_host_copy->instruction->name();\n instruction_list->InsertBeforeInstructions(\n copy_start_to_device_item, {first_item_before_from_host_copy});\n {\n auto item = instruction_list->first();\n while (item != nullptr) {\n if (item == copy_start_to_host_item || item == copy_done_to_host_item ||\n item == copy_start_to_device_item ||\n item == copy_done_to_device_item) {\n item->placed = true;\n } else if (memory_tracker->IsInProgressItem(item)) {\n break;\n }\n item = instruction_list->next(item);\n }\n }\n std::vector best_users_copy = best_instruction->users();\n for (HloInstruction* user : best_users_copy) {\n if (!memory_tracker->IsPlaced(user)) {\n VLOG(3) << \" Replacing use of \" << best_instruction->name() << \" in \"\n << user->name() << \" with \" << copy_done_to_device->name();\n TF_RETURN_IF_ERROR(\n best_instruction->ReplaceUseWith(user, copy_done_to_device));\n } else {\n VLOG(3) << user->name() << \" is placed, not going to update\";\n }\n }\n TF_RETURN_IF_ERROR(memory_tracker->AddHostOffloadCopyInstructions(\n best_item, copy_start_to_host_item, copy_done_to_host_item,\n copy_start_to_device_item, copy_done_to_device_item));\n return 4;\n}\nstruct InstructionsAdded {\n int remat_count;\n int net_instructions_added;\n int effort;\n};\nabsl::StatusOr RematerializeBestBlock(\n int min_block_size, int max_block_size, MemoryUsageTracker* memory_tracker,\n InstructionList* instruction_list, HloSchedule* schedule,\n int64_t memory_limit_bytes,\n absl::flat_hash_map* rematerializable_map,\n absl::flat_hash_set* remat_move_instructions,\n HloRematerialization* rematerialization) {\n CHECK(min_block_size > 0) << \"Negative block size.\";\n std::vector best_items;\n RematStrategy best_strategy;\n int effort;\n std::tie(best_items, best_strategy, effort) =\n memory_tracker->PickRematerializationCandidates(\n *instruction_list, memory_limit_bytes, rematerializable_map,\n min_block_size, max_block_size,\n rematerialization->ComputationPeakMemory(\n memory_tracker->computation()));\n InstructionsAdded num_instructions_added;\n num_instructions_added.remat_count = best_items.size();\n num_instructions_added.effort = effort;\n if (best_items.empty()) {\n num_instructions_added.net_instructions_added = 0;\n return num_instructions_added;\n }\n if (best_strategy.kind == RematStrategy::kCompress) {\n CHECK(best_items.size() == 1)\n << \"More than one instruction compressed simultaneously.\";\n HloInstruction* best = best_items[0]->instruction;\n VLOG(1) << \"Remat via compression: \" << best->name() << \" (saving \"\n << HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed(\n best_items[0], best_strategy.compact_shape))\n << \")\";\n TF_ASSIGN_OR_RETURN(\n num_instructions_added.net_instructions_added,\n CompressInstruction(memory_tracker, best_items[0],\n best_strategy.compact_shape, instruction_list));\n } else if (best_strategy.kind == RematStrategy::kHostOffload) {\n CHECK_EQ(best_items.size(), 1)\n << \"More than one buffer offloaded simultaneously.\";\n VLOG(1) << \"Remat via offload: \" << best_items[0]->instruction->name();\n TF_ASSIGN_OR_RETURN(\n num_instructions_added.net_instructions_added,\n OffloadInstruction(memory_tracker, best_items[0], instruction_list));\n VLOG(4) << \"Offload done, hlo computation:\\n\"\n << memory_tracker->computation()->ToString();\n VLOG(6) << \"Memory tracker:\\n\" << memory_tracker->ToString();\n } else {\n CHECK_EQ(best_strategy.kind, RematStrategy::kRecompute)\n << \"Expecting strategy to be Recompute\";\n VLOG(1) << \"Remat via recomputation: {\"\n << absl::StrJoin(best_items, \", \",\n [](std::string* out, Item* item) {\n absl::StrAppend(out, item->instruction->name());\n })\n << '}';\n TF_ASSIGN_OR_RETURN(\n num_instructions_added.net_instructions_added,\n RematerializeInstructions(memory_tracker, &best_items,\n remat_move_instructions, instruction_list,\n schedule, rematerialization));\n }\n return num_instructions_added;\n}\n} \nabsl::StatusOr HloRematerialization::ComputePeakMemory(\n const HloComputation* computation, const HloInstructionSequence& order,\n const absl::flat_hash_set& execution_threads) const {\n InstructionList instruction_list(order);\n MemoryUsageTracker tracker(options_, computation, *points_to_analysis_,\n instruction_list);\n int64_t peak_memory = tracker.memory_usage();\n for (auto* item = instruction_list.first(); item != nullptr;\n item = instruction_list.next(item)) {\n const HloInstruction* instruction = item->instruction;\n TF_RETURN_IF_ERROR(tracker.BeginInstruction(item));\n TF_ASSIGN_OR_RETURN(\n int64_t callee_usage,\n CalledComputationsMemoryUsage(instruction, execution_threads));\n peak_memory =\n std::max(peak_memory, tracker.memory_usage() + callee_usage);\n TF_RETURN_IF_ERROR(tracker.EndInstruction());\n }\n VLOG(1) << \"Peak memory for \" << computation->name() << \": \"\n << HumanReadableNumBytes(peak_memory);\n return peak_memory;\n}\nabsl::StatusOr HloRematerialization::CalledComputationsMemoryUsage(\n const HloInstruction* instruction,\n const absl::flat_hash_set& execution_threads) const {\n const CallSite* callsite =\n call_graph_->GetNode(instruction->parent()).GetCallSite(instruction);\n if (callsite == nullptr || callsite->context() == CallContext::kEmbedded) {\n return 0;\n }\n int64_t callee_usage = 0;\n for (const HloComputation* computation : callsite->called_computations()) {\n if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),\n execution_threads)) {\n continue;\n }\n TF_RET_CHECK(ContainsKey(computation_peak_memory_, computation));\n callee_usage += computation_peak_memory_.at(computation);\n }\n return callee_usage;\n}\nabsl::StatusOr HloRematerialization::RematerializeComputation(\n HloComputation* computation, HloSchedule* schedule,\n int64_t memory_limit_bytes, int64_t min_remat_size,\n const absl::flat_hash_set& execution_threads) {\n const auto peak_memory_usage = computation_peak_memory_.at(computation);\n if (peak_memory_usage <= memory_limit_bytes) {\n VLOG(1) << \"Asked to rematerialize computation of size \"\n << peak_memory_usage\n << \" but it already fits within the given memory limit (\"\n << memory_limit_bytes << \")\";\n return false;\n }\n VLOG(1) << \"Rematerializing computation \" << computation->name()\n << \" with limit \" << HumanReadableNumBytes(memory_limit_bytes);\n VLOG(1) << \"peak memory usage is \"\n << HumanReadableNumBytes(peak_memory_usage);\n CHECK(!ContainsKey(rematerialized_computations_, computation));\n InstructionList instruction_list(schedule->sequence(computation));\n MemoryUsageTracker memory_tracker(options_, computation, *points_to_analysis_,\n instruction_list);\n instruction_list.PromoteNodesToSkip([&](Item* item) {\n return memory_tracker.AllocatedSize(item) >= min_remat_size;\n });\n bool changed = false;\n absl::flat_hash_set remat_move_instructions;\n absl::flat_hash_map rematerializable_map;\n int64_t peak_memory = memory_tracker.memory_usage();\n int64_t remat_count = 0;\n int64_t net_instructions_added = 0;\n const CallGraphNode& call_graph_node = call_graph_->GetNode(computation);\n int64_t instruction_index = 0;\n for (auto* item = instruction_list.first(); item != nullptr;\n item = instruction_list.next(item)) {\n const HloInstruction* instruction = item->instruction;\n TF_ASSIGN_OR_RETURN(\n int64_t callee_usage,\n CalledComputationsMemoryUsage(instruction, execution_threads));\n TF_RETURN_IF_ERROR(memory_tracker.BeginInstruction(item));\n VLOG(2) << \"Program point at \" << instruction->name()\n << \", memory usage = \" << memory_tracker.memory_usage()\n << \", callee usage = \" << callee_usage << \", [\" << instruction_index\n << \"/\" << instruction_list.size() << \"]\";\n instruction_index++;\n int min_block_size = 1;\n int max_block_size = 1;\n if (memory_tracker.AllocatedSize(item) + callee_usage > 0) {\n bool is_first_phase = true;\n int64_t first_phase_effort = 0;\n int64_t second_phase_effort = 0;\n while (memory_tracker.memory_usage() + callee_usage >\n memory_limit_bytes) {\n VLOG(2) << \"Over memory limit at instruction \" << instruction->name()\n << \", using \"\n << HumanReadableNumBytes(memory_tracker.memory_usage() +\n callee_usage)\n << \", limit is \" << HumanReadableNumBytes(memory_limit_bytes);\n TF_ASSIGN_OR_RETURN(\n InstructionsAdded instructions_added,\n RematerializeBestBlock(min_block_size, max_block_size,\n &memory_tracker, &instruction_list, schedule,\n memory_limit_bytes, &rematerializable_map,\n &remat_move_instructions, this));\n net_instructions_added += instructions_added.net_instructions_added;\n remat_count += instructions_added.remat_count;\n if (is_first_phase) {\n first_phase_effort += instructions_added.effort;\n } else {\n second_phase_effort += instructions_added.effort;\n }\n if (instructions_added.net_instructions_added > 0) {\n VLOG(1) << \"memory_usage after rematerialization = \"\n << HumanReadableNumBytes(memory_tracker.memory_usage());\n }\n if (instructions_added.remat_count == 0) {\n min_block_size = max_block_size + 1;\n max_block_size = 2 * max_block_size;\n is_first_phase = false;\n } else {\n max_rematerialized_block_size_ =\n std::max(max_rematerialized_block_size_, max_block_size);\n changed = true;\n min_block_size = 1;\n max_block_size = 1;\n }\n if (max_block_size > options_.block_size_limit ||\n second_phase_effort >\n options_.block_rematerialization_factor * first_phase_effort) {\n break;\n }\n }\n }\n const CallSite* callsite = call_graph_node.GetCallSite(instruction);\n if (callsite != nullptr &&\n callsite->context() == CallContext::kControlFlow &&\n memory_tracker.memory_usage() + callee_usage > memory_limit_bytes) {\n VLOG(1) << \"Memory usage still over the limit (\"\n << (memory_tracker.memory_usage() + callee_usage) << \" > \"\n << memory_limit_bytes\n << \"). Rematerializing computations called by \"\n << instruction->name();\n for (HloComputation* called_computation :\n callsite->called_computations()) {\n if (!ContainsKey(rematerialized_computations_, called_computation) &&\n HloInstruction::IsThreadIncluded(\n called_computation->execution_thread(), execution_threads)) {\n int64_t subcomputation_memory_limit_bytes = std::max(\n 0, memory_limit_bytes - memory_tracker.memory_usage());\n TF_ASSIGN_OR_RETURN(\n bool subcomputation_changed,\n RematerializeComputation(called_computation, schedule,\n subcomputation_memory_limit_bytes,\n min_remat_size, execution_threads));\n changed |= subcomputation_changed;\n }\n }\n TF_ASSIGN_OR_RETURN(callee_usage, CalledComputationsMemoryUsage(\n instruction, execution_threads));\n }\n peak_memory = std::max(\n peak_memory, memory_tracker.memory_usage() + callee_usage);\n VLOG(3) << \"peak memory usage = \" << HumanReadableNumBytes(peak_memory);\n TF_RETURN_IF_ERROR(memory_tracker.EndInstruction());\n }\n for (auto* instruction : computation->instructions()) {\n CHECK(memory_tracker.IsPlaced(instruction)) << instruction->name();\n }\n VLOG(1) << \"In computation \" << computation->name() << \" rematerialized \"\n << remat_count << \" instructions; \" << net_instructions_added\n << \" net instructions added\";\n VLOG(1) << \" peak memory usage now \" << HumanReadableNumBytes(peak_memory)\n << \" (was \"\n << HumanReadableNumBytes(computation_peak_memory_.at(computation))\n << \")\";\n computation_peak_memory_.at(computation) = peak_memory;\n HloInstructionSequence& sequence = schedule->GetOrCreateSequence(computation);\n sequence.clear();\n for (auto* item = instruction_list.first(); item != nullptr;\n item = instruction_list.next(item)) {\n HloInstruction* instruction = item->instruction;\n sequence.push_back(instruction);\n }\n rematerialized_computations_.insert(computation);\n instructions_rematerialized_ += remat_count;\n net_instructions_added_ += net_instructions_added;\n return changed;\n}\nabsl::StatusOr HloRematerialization::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n if (options_.remat_mode_config.host_offload) {\n CHECK(options_.host_memory_offload_config.has_value())\n << \"Host memory config is required when host memory offload strategy \"\n \"is specified\";\n }\n VLOG(1) << \"HloRematerialization() with memory limit of \"\n << HumanReadableNumBytes(options_.memory_limit_bytes);\n if (!options_.remat_mode_config.compress &&\n !options_.remat_mode_config.recompute &&\n !options_.remat_mode_config.host_offload) {\n VLOG(1) << \"All rematerialization strategies are disabled. Skipping.\";\n return false;\n }\n VLOG(2) << \"HloRemat mode: compress: \" << options_.remat_mode_config.compress\n << \", host_offload: \" << options_.remat_mode_config.host_offload\n << \", recompute: \" << options_.remat_mode_config.recompute;\n XLA_VLOG_LINES(3, \"Before HloRematerialization:\\n\" + module->ToString());\n computation_peak_memory_.clear();\n rematerialized_computations_.clear();\n instructions_rematerialized_ = 0;\n net_instructions_added_ = 0;\n TF_RET_CHECK(module->has_schedule());\n TF_ASSIGN_OR_RETURN(points_to_analysis_, TuplePointsToAnalysis::Run(module));\n next_channel_id_ = hlo_query::NextChannelId(*module);\n int64_t module_output_size = 0;\n ShapeUtil::ForEachSubshape(\n module->result_shape(),\n [&module_output_size, this](const Shape& subshape,\n const ShapeIndex& output_index) {\n module_output_size += options_.hlo_cost_analysis.GetShapeSize(subshape);\n });\n int64_t adjusted_memory_limit_bytes =\n std::max(0, options_.memory_limit_bytes - module_output_size);\n VLOG(1) << \"Adjusted memory limit accounting for output (\"\n << HumanReadableNumBytes(module_output_size)\n << \"): \" << HumanReadableNumBytes(adjusted_memory_limit_bytes);\n call_graph_ = CallGraph::Build(module);\n int64_t total_async_peak_memory = 0;\n if (!options_.async_computation_parallelism.empty()) {\n absl::flat_hash_set async_threads;\n for (const auto& [computation, _] :\n options_.async_computation_parallelism) {\n async_threads.insert(computation->execution_thread());\n }\n TF_RETURN_IF_ERROR(call_graph_->VisitNodes(\n [this, module,\n &async_threads](const CallGraphNode& node) -> absl::Status {\n auto callee_thread = node.computation()->execution_thread();\n if (node.context() == CallContext::kControlFlow &&\n HloInstruction::IsThreadIncluded(callee_thread, async_threads)) {\n TF_ASSIGN_OR_RETURN(computation_peak_memory_[node.computation()],\n ComputePeakMemory(node.computation(),\n module->schedule().sequence(\n node.computation()),\n {callee_thread}));\n }\n return absl::OkStatus();\n },\n false));\n int64_t async_peak_memory = 0;\n for (const auto [entry_computation, parallel_threads] :\n options_.async_computation_parallelism) {\n const int64_t peak_memory =\n computation_peak_memory_.at(entry_computation);\n const int64_t parallel_peak_memory = peak_memory * parallel_threads;\n async_peak_memory = std::max(async_peak_memory, parallel_peak_memory);\n }\n adjusted_memory_limit_bytes =\n std::max(0, adjusted_memory_limit_bytes - async_peak_memory);\n total_async_peak_memory += async_peak_memory;\n VLOG(1) << \"Adjusted memory limit accounting for async computations (\"\n << HumanReadableNumBytes(async_peak_memory)\n << \"): \" << HumanReadableNumBytes(adjusted_memory_limit_bytes);\n computation_peak_memory_.clear();\n }\n TF_RETURN_IF_ERROR(call_graph_->VisitNodes(\n [this, module,\n &execution_threads](const CallGraphNode& node) -> absl::Status {\n if (node.context() == CallContext::kControlFlow &&\n HloInstruction::IsThreadIncluded(\n node.computation()->execution_thread(), execution_threads)) {\n TF_ASSIGN_OR_RETURN(\n computation_peak_memory_[node.computation()],\n ComputePeakMemory(node.computation(),\n module->schedule().sequence(node.computation()),\n execution_threads));\n }\n return absl::OkStatus();\n },\n false));\n const int64_t before_peak_memory =\n computation_peak_memory_.at(module->entry_computation()) +\n module_output_size + total_async_peak_memory;\n VLOG(1) << \"Peak memory usage of module (before): \"\n << HumanReadableNumBytes(before_peak_memory);\n for (auto* computation :\n module->MakeComputationPostOrder(execution_threads)) {\n TF_RETURN_IF_ERROR(computation->Accept(&options_.hlo_cost_analysis));\n }\n TF_ASSIGN_OR_RETURN(\n bool changed,\n RematerializeComputation(module->entry_computation(), &module->schedule(),\n adjusted_memory_limit_bytes,\n options_.min_remat_size, execution_threads));\n HloSchedule saved_schedule = module->schedule();\n module->clear_schedule();\n TF_ASSIGN_OR_RETURN(bool dead_code_removed, HloDCE().Run(module));\n changed |= dead_code_removed;\n TF_RETURN_IF_ERROR(saved_schedule.Update(execution_threads));\n TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule)));\n VLOG(1) << \"Rematerialized \" << instructions_rematerialized_\n << \" instructions in module \" << module->name() << \"; \"\n << net_instructions_added_ << \" net instructions added\";\n const int64_t current_peak_memory =\n computation_peak_memory_.at(module->entry_computation()) +\n module_output_size + total_async_peak_memory;\n VLOG(1) << \"Peak memory usage of module now \"\n << HumanReadableNumBytes(current_peak_memory) << \" (\"\n << current_peak_memory << \" bytes), was \"\n << HumanReadableNumBytes(before_peak_memory) << \" (\"\n << before_peak_memory << \" bytes)\";\n const int64_t reduced_peak_memory = before_peak_memory - current_peak_memory;\n VLOG(1) << \"Reduced peak memory by \"\n << HumanReadableNumBytes(reduced_peak_memory) << \" (\"\n << reduced_peak_memory << \" bytes)\";\n sizes_.before_bytes = before_peak_memory;\n sizes_.after_bytes = current_peak_memory;\n XLA_VLOG_LINES(5, \"After HloRematerialization:\\n\" + module->ToString());\n if (current_peak_memory > options_.memory_limit_bytes) {\n LOG(WARNING) << absl::StrFormat(\n \"Can't reduce memory use below %s (%d bytes) by rematerialization; \"\n \"only reduced to %s (%d bytes), down from %s (%d bytes) originally\",\n HumanReadableNumBytes(options_.memory_limit_bytes),\n options_.memory_limit_bytes, HumanReadableNumBytes(current_peak_memory),\n current_peak_memory, HumanReadableNumBytes(before_peak_memory),\n before_peak_memory);\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_rematerialization.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/layout.h\"\n#include \"xla/service/hlo_cost_analysis.h\"\n#include \"xla/service/hlo_memory_scheduler.h\"\n#include \"xla/service/hlo_rematerialization_test_utils.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nusing ::testing::_;\nclass AsyncRematerializationTest : public RematerializationTestBase {\n protected:\n absl::StatusOr RunHloRematerialization(\n int64_t memory_limit_bytes, HloModule* module,\n const absl::flat_hash_map&\n async_computation_parallelism,\n int64_t min_remat_size = 0) {\n TF_EXPECT_OK(verifier().Run(module).status());\n if (!module->has_schedule()) {\n HloMemoryScheduler scheduler(\n [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },\n ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));\n TF_EXPECT_OK(scheduler.Run(module).status());\n }\n HloRematerialization::RematerializationModeConfig config(\n true, true, false);\n auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };\n HloCostAnalysis cost_analysis(shape_size_func);\n HloRematerialization::Options options(\n cost_analysis, config, memory_limit_bytes,\n 1, 1,\n min_remat_size, nullptr,\n std::nullopt,\n async_computation_parallelism);\n HloRematerialization::RematerializationSizes sizes;\n HloRematerialization remat(options, sizes);\n return remat.Run(module, {HloInstruction::kMainExecutionThread});\n }\n static constexpr int64_t kNumParallelThreads = 16;\n};\nTEST_F(AsyncRematerializationTest, AsyncComputation) {\n constexpr std::string_view hlo = R\"(\nHloModule async, is_scheduled=true\n%offload_computation {\n %param = f32[1]{0} parameter(0)\n %reshape = f32[] reshape(f32[1]{0} %param)\n %broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}\n %negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)\n %concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}\n %slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}\n %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}\n ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}\n}\n%main_computation {\n %param = f32[1]{0} parameter(0)\n %reshape = f32[] reshape(f32[1]{0} %param)\n %broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}\n %negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)\n %concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}\n %slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}\n %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}\n ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}\n}\nENTRY %main {\n %param = f32[1]{0} parameter(0)\n %call-start = ((f32[1]{0}), f32[1]{0}, s32[]) call-start(f32[1]{0} %param), to_apply=%offload_computation, async_execution_thread=\"offload\"\n %call-done = f32[1]{0} call-done(((f32[1]{0}), f32[1]{0}, s32[]) %call-start)\n ROOT %call = f32[1]{0} call(f32[1]{0} %call-done), to_apply=%main_computation\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n HloInstruction* call_start = FindInstruction(module.get(), \"call-start\");\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n RunHloRematerialization(\n kNumParallelThreads * 16 * 1024 + 14 * 1024,\n module.get(),\n {{call_start->async_wrapped_computation(), kNumParallelThreads}}));\n EXPECT_TRUE(changed);\n}\nclass RecomputeAndCompressHloRematerializationTest\n : public RematerializationTestBase {\n protected:\n absl::StatusOr RunHloRematerialization(int64_t memory_limit_bytes,\n HloModule* module,\n int64_t min_remat_size = 0) {\n TF_EXPECT_OK(verifier().Run(module).status());\n if (!module->has_schedule()) {\n HloMemoryScheduler scheduler(\n [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },\n ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));\n TF_EXPECT_OK(scheduler.Run(module).status());\n }\n for (const HloComputation* computation : module->computations()) {\n before_computation_names_.insert(computation->name());\n for (const HloInstruction* instruction : computation->instructions()) {\n before_instruction_names_.insert(instruction->name());\n }\n }\n HloRematerialization::RematerializationModeConfig config(\n true, true, false);\n auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };\n HloCostAnalysis cost_analysis(shape_size_func);\n HloRematerialization::Options options(\n cost_analysis, config, memory_limit_bytes,\n 1, 1,\n min_remat_size, nullptr,\n std::nullopt,\n {});\n HloRematerialization::RematerializationSizes sizes;\n HloRematerialization remat(options, sizes);\n absl::StatusOr result = remat.Run(module);\n for (const HloComputation* computation : module->computations()) {\n if (!before_computation_names_.contains(computation->name())) {\n continue;\n }\n for (const HloInstruction* instruction : computation->instructions()) {\n after_instruction_names_.insert(instruction->name());\n }\n }\n return result;\n }\n void CheckForRematInInstructionNames(absl::string_view test_case_name) {\n constexpr const absl::string_view kRematInstructionNameMustContain =\n \".remat\";\n for (const auto& instruction_name : after_instruction_names_) {\n if (!before_instruction_names_.contains(instruction_name)) {\n EXPECT_TRUE(absl::StrContains(instruction_name,\n kRematInstructionNameMustContain))\n << \"[\" << test_case_name << \"] Instruction \\\"\" << instruction_name\n << \"\\\" must contain \\\"\" << kRematInstructionNameMustContain << \"\\\"\";\n }\n }\n }\n private:\n absl::flat_hash_set before_computation_names_;\n absl::flat_hash_set before_instruction_names_;\n absl::flat_hash_set after_instruction_names_;\n};\nTEST_F(RecomputeAndCompressHloRematerializationTest, SingleComputation) {\n auto module = CreateNewVerifiedModule();\n HloComputation* computation =\n module->AddEntryComputation(MakeRematerializableComputation());\n const HloInstruction* slice = computation->root_instruction();\n ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));\n const HloInstruction* concat = slice->operand(0);\n const HloInstruction* bcast = concat->operand(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 14 * 1024, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(computation->root_instruction(), slice);\n const HloInstruction* remat_bcast = concat->operand(0);\n EXPECT_THAT(remat_bcast, op::Broadcast(::testing::Ne(bcast)));\n EXPECT_EQ(module->schedule()\n .sequence(computation)\n .instructions()[computation->instruction_count() - 2],\n concat);\n EXPECT_EQ(module->schedule()\n .sequence(computation)\n .instructions()[computation->instruction_count() - 3],\n remat_bcast);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n SingleComputationNoWorthRemat) {\n auto module = CreateNewVerifiedModule();\n HloComputation* computation =\n module->AddEntryComputation(MakeRematerializableComputation());\n const HloInstruction* slice = computation->root_instruction();\n ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 14 * 1024, module.get(),\n 14 * 1024));\n EXPECT_FALSE(changed);\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n SingleComputationNoRematerialization) {\n auto module = CreateNewVerifiedModule();\n HloComputation* computation =\n module->AddEntryComputation(MakeRematerializableComputation());\n EXPECT_EQ(computation->instruction_count(), 8);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 20 * 1024, module.get()));\n EXPECT_FALSE(changed);\n EXPECT_EQ(computation->instruction_count(), 8);\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, RematerializeAroundWhile) {\n auto module = CreateNewVerifiedModule();\n auto cond_builder = HloComputation::Builder(TestName() + \".cond\");\n cond_builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec1_shape_, \"param\"));\n cond_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(true)));\n HloComputation* while_cond =\n module->AddEmbeddedComputation(cond_builder.Build());\n HloComputation* body_computation = module->AddEmbeddedComputation(\n MakeRematerializableComputation(\".body\"));\n HloComputation* entry_computation =\n module->AddEntryComputation(MakeRematerializableWhileComputation(\n while_cond, body_computation));\n EXPECT_EQ(entry_computation->instruction_count(), 7);\n EXPECT_EQ(body_computation->instruction_count(), 8);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 17 * 1024, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(entry_computation->instruction_count(), 8);\n EXPECT_EQ(body_computation->instruction_count(), 8);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n RematerializeEntryAndWhileBody) {\n auto module = CreateNewVerifiedModule();\n auto cond_builder = HloComputation::Builder(TestName() + \".cond\");\n cond_builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec1_shape_, \"param\"));\n cond_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(true)));\n HloComputation* while_cond =\n module->AddEmbeddedComputation(cond_builder.Build());\n HloComputation* body_computation = module->AddEmbeddedComputation(\n MakeRematerializableComputation(\".body\"));\n HloComputation* entry_computation =\n module->AddEntryComputation(MakeRematerializableWhileComputation(\n while_cond, body_computation));\n EXPECT_EQ(entry_computation->instruction_count(), 7);\n EXPECT_EQ(body_computation->instruction_count(), 8);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 15 * 1024, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(entry_computation->instruction_count(), 9);\n EXPECT_EQ(body_computation->instruction_count(), 9);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n RematerializeNestedComputations) {\n auto module = CreateNewVerifiedModule();\n auto cond_builder = HloComputation::Builder(TestName() + \".cond\");\n cond_builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec1_shape_, \"param\"));\n cond_builder.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(true)));\n HloComputation* while_cond =\n module->AddEmbeddedComputation(cond_builder.Build());\n HloComputation* while_cond_copy =\n module->AddEmbeddedComputation(while_cond->Clone());\n HloComputation* inner_computation = module->AddEmbeddedComputation(\n MakeRematerializableComputation(\".inner\"));\n HloComputation* middle_computation =\n module->AddEmbeddedComputation(MakeRematerializableWhileComputation(\n while_cond, inner_computation,\n \".middle\"));\n HloComputation* entry_computation =\n module->AddEntryComputation(MakeRematerializableWhileComputation(\n while_cond_copy, middle_computation));\n EXPECT_EQ(entry_computation->instruction_count(), 7);\n EXPECT_EQ(middle_computation->instruction_count(), 7);\n EXPECT_EQ(inner_computation->instruction_count(), 8);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 13 * 1024, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(entry_computation->instruction_count(), 9);\n EXPECT_EQ(middle_computation->instruction_count(), 9);\n EXPECT_EQ(inner_computation->instruction_count(), 9);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, RngNotRematerialized) {\n auto module = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, scalar_shape_, \"param\"));\n auto rng = builder.AddInstruction(HloInstruction::CreateRng(\n vec1024_shape_, RandomDistribution::RNG_UNIFORM, {param, param}));\n auto tanh = builder.AddInstruction(\n HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kTanh, rng));\n auto exp = builder.AddInstruction(\n HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kExp, rng));\n auto add_0 = builder.AddInstruction(\n HloInstruction::CreateBinary(vec1024_shape_, HloOpcode::kAdd, rng, tanh));\n auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, rng,\n builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, exp, add_0))));\n builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, rng,\n builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, tanh, add_1))));\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build());\n auto count_rngs = [](const HloComputation* computation) {\n int64_t rng_count = 0;\n for (auto* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kRng) {\n ++rng_count;\n }\n }\n return rng_count;\n };\n ASSERT_EQ(count_rngs(entry_computation), 1);\n const int64_t original_instruction_count =\n entry_computation->instruction_count();\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n RunHloRematerialization(\n 4 * ByteSizeOf(vec1024_shape_), module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(count_rngs(entry_computation), 1);\n EXPECT_GT(entry_computation->instruction_count(), original_instruction_count);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n InstructionRematerializedMultipleTimes) {\n auto module = CreateNewVerifiedModule();\n HloComputation* subcomputation = nullptr;\n {\n auto builder = HloComputation::Builder(TestName() + \".subcomputation\");\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec1024_shape_, \"param\"));\n auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(\n ShapeUtil::MakeShape(xla::F32, {2048}), {param, param},\n 0));\n builder.AddInstruction(HloInstruction::CreateSlice(\n vec1024_shape_, concat, {0},\n {1024}, {1}));\n subcomputation = module->AddEmbeddedComputation(builder.Build());\n }\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, scalar_shape_, \"param\"));\n auto bcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(vec1024_shape_, param, {}));\n auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, bcast, bcast));\n auto call_1 = builder.AddInstruction(\n HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation));\n auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, bcast, call_1));\n auto call_2 = builder.AddInstruction(\n HloInstruction::CreateCall(vec1024_shape_, {add_2}, subcomputation));\n auto add_3 = builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, bcast, call_2));\n auto call_3 = builder.AddInstruction(\n HloInstruction::CreateCall(vec1024_shape_, {add_3}, subcomputation));\n auto add_4 = builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, bcast, call_3));\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build());\n auto count_broadcasts = [](const HloComputation* computation) {\n int64_t bcast_count = 0;\n for (auto* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kBroadcast) {\n bcast_count++;\n }\n }\n return bcast_count;\n };\n EXPECT_EQ(count_broadcasts(entry_computation), 1);\n EXPECT_EQ(entry_computation->instruction_count(), 9);\n EXPECT_EQ(add_2->operand(0), bcast);\n EXPECT_EQ(add_3->operand(0), bcast);\n EXPECT_EQ(add_4->operand(0), bcast);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 22 * 1024, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(count_broadcasts(entry_computation), 4);\n EXPECT_EQ(entry_computation->instruction_count(), 12);\n EXPECT_NE(add_2->operand(0), bcast);\n EXPECT_THAT(add_2->operand(0), op::Broadcast(param));\n EXPECT_NE(add_3->operand(0), bcast);\n EXPECT_THAT(add_3->operand(0), op::Broadcast(param));\n EXPECT_NE(add_4->operand(0), bcast);\n EXPECT_THAT(add_4->operand(0), op::Broadcast(param));\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, CopyNotRematerialized) {\n auto module = CreateNewVerifiedModule();\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec1024_shape_, \"param\"));\n auto copy = builder.AddInstruction(\n HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kCopy, param));\n auto negate_a_1 = builder.AddInstruction(\n HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));\n auto negate_a_2 = builder.AddInstruction(HloInstruction::CreateUnary(\n vec1024_shape_, HloOpcode::kNegate, negate_a_1));\n auto negate_b_1 = builder.AddInstruction(\n HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));\n auto negate_b_2 = builder.AddInstruction(HloInstruction::CreateUnary(\n vec1024_shape_, HloOpcode::kNegate, negate_b_1));\n builder.AddInstruction(HloInstruction::CreateTuple({negate_a_2, negate_b_2}));\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build());\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 1 * 1024, module.get()));\n auto count_copies = [](const HloComputation* computation) {\n int64_t copy_count = 0;\n for (auto* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kCopy) {\n copy_count++;\n }\n }\n return copy_count;\n };\n EXPECT_TRUE(changed);\n EXPECT_EQ(count_copies(entry_computation), 1);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, ThroughBitcastRemat) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\nENTRY %mycomp (param: f32[1]) -> f32[1] {\n %param = f32[1]{0} parameter(0)\n %reshape = f32[] reshape(f32[1]{0} %param)\n %broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}\n %bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)\n %negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %broadcast)\n %concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate, f32[1024,1]{1,0} %negate), dimensions={0}\n %slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate), slice={[0:1], [0:1]}\n %bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice)\n %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast, f32[1]{0} %bitcast.1), dimensions={0}\n ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto* computation = module->entry_computation();\n const HloInstruction* slice = computation->root_instruction();\n ASSERT_THAT(slice,\n op::Slice(op::Concatenate(op::Bitcast(op::Broadcast(_)), _)));\n const HloInstruction* concat = slice->operand(0);\n const HloInstruction* bcast = concat->operand(0)->operand(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 14 * 1024, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(computation->root_instruction(), slice);\n const HloInstruction* remat_bitcast = concat->operand(0);\n const HloInstruction* remat_broadcast = remat_bitcast->operand(0);\n EXPECT_THAT(remat_broadcast, op::Broadcast(::testing::Ne(bcast)));\n EXPECT_EQ(module->schedule()\n .sequence(computation)\n .instructions()[computation->instruction_count() - 2],\n concat);\n EXPECT_EQ(module->schedule()\n .sequence(computation)\n .instructions()[computation->instruction_count() - 3],\n remat_bitcast);\n EXPECT_EQ(module->schedule()\n .sequence(computation)\n .instructions()[computation->instruction_count() - 4],\n remat_broadcast);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n ThroughBitcastRematInfiniteLoop) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\nENTRY %mycomp (param: f32[1]) -> f32[1024] {\n %param = f32[1]{0} parameter(0)\n %reshape = f32[] reshape(f32[1]{0} %param)\n %broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}\n %bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)\n %broadcast2 = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}\n %bitcast2 = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast2)\n ROOT %add = f32[1024]{0} add(f32[1024]{0} %bitcast, f32[1024]{0} %bitcast2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto* computation = module->entry_computation();\n const HloInstruction* add = computation->root_instruction();\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 1024, module.get()));\n ASSERT_THAT(add, op::Add(op::Bitcast(op::Broadcast(_)),\n op::Bitcast(op::Broadcast(_))));\n EXPECT_TRUE(changed);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShape) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_mul_comp {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}\n %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}\n %add = f32[1024] add(%x, %y)\n %mul = f32[1024] multiply(%x, %y)\n ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %param.1 = f32[] parameter(1)\n %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,\n calls=%add_mul_comp\n %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0\n %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)\n %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}\n %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)\n %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1\n ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n const HloComputation* computation = module->entry_computation();\n const HloInstruction* add = computation->root_instruction();\n ASSERT_THAT(add, op::Add(op::Multiply(), op::GetTupleElement(op::Fusion())));\n const HloInstruction* fusion = add->operand(0)->operand(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 11 * 1024, module.get()));\n EXPECT_TRUE(changed);\n ASSERT_THAT(\n add, op::Add(op::Multiply(), AllOf(op::Fusion(), ::testing::Ne(fusion))));\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShapeDoubleUse) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_mul_comp {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}\n %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}\n %add = f32[1024] add(%x, %y)\n %mul = f32[1024] multiply(%x, %y)\n ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %param.1 = f32[] parameter(1)\n %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,\n calls=%add_mul_comp\n %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0\n %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)\n %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}\n %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)\n %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1\n %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0\n %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)\n ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n const HloComputation* computation = module->entry_computation();\n const HloInstruction* add = computation->root_instruction();\n ASSERT_THAT(add, op::Multiply(op::Add(op::Multiply(),\n op::GetTupleElement(op::Fusion())),\n op::GetTupleElement(op::Fusion())));\n const HloInstruction* fusion = add->operand(0)->operand(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 11 * 1024, module.get()));\n EXPECT_TRUE(changed);\n ASSERT_THAT(\n add,\n op::Multiply(\n op::Add(op::Multiply(), op::GetTupleElement(AllOf(\n op::Fusion(), ::testing::Ne(fusion)))),\n op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion)))));\n EXPECT_EQ(add->operand(0)->operand(1)->operand(0),\n add->operand(1)->operand(0));\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n RematTupleShapeThroughBitcasts) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_mul_comp {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}\n %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}\n %add = f32[1024] add(%x, %y)\n %mul = f32[1024] multiply(%x, %y)\n ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %param.1 = f32[] parameter(1)\n %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,\n calls=%add_mul_comp\n %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0\n %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)\n %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}\n %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)\n %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1\n %bc.1 = f32[1024,1]{0,1} bitcast(%mul)\n %bc.2 = f32[1024,1]{0,1} bitcast(%gte.2)\n ROOT %add.2 = f32[1024,1]{0,1} add(f32[1024,1]{0,1} %bc.1,\n f32[1024,1]{0,1} %bc.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n const HloComputation* computation = module->entry_computation();\n const HloInstruction* add = computation->root_instruction();\n ASSERT_THAT(add, op::Add(op::Bitcast(op::Multiply()),\n op::Bitcast(op::GetTupleElement(op::Fusion()))));\n const HloInstruction* fusion = add->operand(0)->operand(0)->operand(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 11 * 1024, module.get()));\n EXPECT_TRUE(changed);\n ASSERT_THAT(add,\n op::Add(op::Bitcast(op::Multiply()),\n op::Bitcast(AllOf(op::Fusion(), ::testing::Ne(fusion)))));\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, RematThroughTuple) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_mul_comp {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}\n %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}\n %add = f32[1024] add(%x, %y)\n %mul = f32[1024] multiply(%x, %y)\n ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %param.1 = f32[] parameter(1)\n %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,\n calls=%add_mul_comp\n %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0\n %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1\n %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3)\n %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}\n %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)\n %tpl = (f32[1024]{0}, f32[1024]{0}) tuple(%gte.1, %add)\n %bc.1 = f32[1024,1]{0,1} bitcast(%mul)\n %gte.2 = f32[1024]{0} get-tuple-element(%tpl), index=0\n ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %gte.2, f32[1024]{0} %add)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n const HloComputation* computation = module->entry_computation();\n const HloInstruction* add = computation->root_instruction();\n ASSERT_THAT(add, op::Add(op::GetTupleElement(\n op::Tuple(op::GetTupleElement(op::Fusion()), _)),\n op::Add()));\n const HloInstruction* tuple = add->operand(0)->operand(0);\n const HloInstruction* fusion = tuple->operand(0)->operand(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 11 * 1024, module.get()));\n EXPECT_TRUE(changed);\n ASSERT_THAT(add, op::Add(AllOf(op::Fusion(), ::testing::Ne(tuple),\n ::testing::Ne(fusion)),\n op::Add()));\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, AllGatherChannelId) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\nENTRY %mycomp (param: f32[1]) -> f32[1] {\n %param = f32[1]{0} parameter(0)\n %reshape = f32[] reshape(f32[1]{0} %param)\n %broadcast = f32[256,1]{1,0} broadcast(f32[] %reshape), dimensions={}\n %ag = f32[1024,1]{1,0} all-gather(f32[256,1]{1,0} %broadcast), dimensions={0},\n channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true\n %bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %ag)\n %negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %ag)\n %concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate,\n f32[1024,1]{1,0} %negate), dimensions={0}\n %slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate),\n slice={[0:1], [0:1]}\n %bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice)\n %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast,\n f32[1]{0} %bitcast.1), dimensions={0}\n ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto* computation = module->entry_computation();\n const HloInstruction* slice = computation->root_instruction();\n ASSERT_THAT(slice, op::Slice(op::Concatenate(\n op::Bitcast(op::AllGather(op::Broadcast(_))), _)));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 14 * 1024, module.get()));\n EXPECT_TRUE(changed);\n EXPECT_EQ(computation->root_instruction(), slice);\n const HloInstruction* original_ag = FindInstruction(module.get(), \"ag\");\n const HloInstruction* remat_ag = FindInstruction(module.get(), \"ag.remat\");\n EXPECT_NE(remat_ag, nullptr);\n EXPECT_TRUE(original_ag->channel_id().has_value());\n EXPECT_TRUE(remat_ag->channel_id().has_value());\n EXPECT_EQ(*remat_ag->channel_id(), *original_ag->channel_id() + 1);\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleArgFusion) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_mul_comp {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}\n %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}\n %add = f32[1024] add(%x, %y)\n %mul = f32[1024] multiply(%x, %y)\n ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)\n}\n%add_comp {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n ROOT %add = add(%p0, %p1)\n}\n%add_tuple_comp {\n %p = (f32[1024]{0}, f32[1024]{0}) parameter(0)\n %p0 = get-tuple-element(%p), index=0\n %p1 = get-tuple-element(%p), index=1\n ROOT %add = add(%p0, %p1)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %param.1 = f32[] parameter(1)\n %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,\n calls=%add_mul_comp\n %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0\n %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1\n %add.0 = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3)\n %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}\n %add.1 = f32[1024]{0} add(f32[1024]{0} %add.0, f32[1024]{0} %broadcast.1)\n %c = f32[] constant(0)\n %reduce = f32[] reduce(%add.1, %c), dimensions={0}, to_apply=add_comp\n %fus.1 = f32[1024]{0} fusion(%fus), kind=kLoop, calls=%add_tuple_comp\n ROOT %tuple = tuple(%reduce, %fus.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n const HloComputation* computation = module->entry_computation();\n const HloInstruction* root = computation->root_instruction();\n ASSERT_THAT(root, op::Tuple(op::Reduce(), op::Fusion(op::Fusion())));\n const HloInstruction* fusion1 = root->operand(1);\n const HloInstruction* fusion0 = fusion1->operand(0);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 11 * 1024, module.get()));\n EXPECT_TRUE(changed);\n ASSERT_THAT(\n root, op::Tuple(op::Reduce(),\n op::Fusion(AllOf(op::Fusion(), ::testing::Ne(fusion0)))));\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nTEST_F(RecomputeAndCompressHloRematerializationTest,\n RematFusionUpdateSchedule) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%custom_call_comp {\n %p = f32[1024]{0} parameter(0)\n ROOT %n = f32[1024]{0} negate(p)\n}\n%add_mul_comp {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}\n %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}\n %add = f32[1024] add(%x, %y)\n %mul = f32[1024] multiply(%x, %y)\n %c = f32[1024] custom-call(%mul), custom_call_target=\"SomeCall\", called_computations={custom_call_comp}\n ROOT %out = (f32[1024], f32[1024]) tuple(%add, %c)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %param.1 = f32[] parameter(1)\n %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,\n calls=%add_mul_comp\n %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0\n %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)\n %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}\n %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)\n %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1\n %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0\n %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)\n ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n const HloComputation* computation = module->entry_computation();\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 11 * 1024, module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n const HloInstruction* add = computation->root_instruction();\n const HloInstruction* fusion = add->operand(0)->operand(0);\n ASSERT_THAT(\n add,\n op::Multiply(\n op::Add(op::Multiply(), op::GetTupleElement(AllOf(\n op::Fusion(), ::testing::Ne(fusion)))),\n op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion)))));\n const HloInstruction* fusion0 = add->operand(0)->operand(1)->operand(0);\n const HloInstruction* fusion1 = add->operand(1)->operand(0);\n auto it = std::find_if(fusion0->fused_instructions().begin(),\n fusion0->fused_instructions().end(),\n [](const HloInstruction* instr) {\n return instr->opcode() == HloOpcode::kCustomCall;\n });\n ASSERT_NE(it, fusion0->fused_instructions().end());\n auto it2 = std::find_if(fusion1->fused_instructions().begin(),\n fusion1->fused_instructions().end(),\n [](const HloInstruction* instr) {\n return instr->opcode() == HloOpcode::kCustomCall;\n });\n ASSERT_NE(it2, fusion1->fused_instructions().end());\n EXPECT_TRUE(module->schedule().is_computation_scheduled(\n (*it)->called_computations()[0]));\n EXPECT_TRUE(module->schedule().is_computation_scheduled(\n (*it2)->called_computations()[0]));\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nclass CompressingRematerializationTest : public RematerializationTestBase {\n protected:\n static int64_t ShapeSizePadMinorTo64(const Shape& shape) {\n if (shape.IsTuple()) {\n return 4;\n }\n Shape descending_shape =\n ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape);\n int64_t size =\n ShapeUtil::ByteSizeOfPrimitiveType(descending_shape.element_type());\n for (int64_t i = 0; i < descending_shape.rank(); ++i) {\n int64_t dim = descending_shape.dimensions(i);\n if (i == descending_shape.rank() - 1) {\n dim = RoundUpTo(dim, 64);\n }\n size *= dim;\n }\n return size;\n }\n static absl::StatusOr ChooseCompactLayoutForShape(const Shape& shape) {\n if (shape.rank() != 2) {\n return shape;\n }\n Shape result = shape;\n Layout layout = result.layout();\n int64_t most_minor_index = layout.minor_to_major()[0];\n int64_t second_minor_index = layout.minor_to_major()[1];\n int64_t most_minor = result.dimensions(most_minor_index);\n int64_t second_minor = result.dimensions(second_minor_index);\n if (most_minor < second_minor) {\n Layout new_layout = layout;\n new_layout.set_minor_to_major(0, second_minor_index);\n new_layout.set_minor_to_major(1, most_minor_index);\n *result.mutable_layout() = new_layout;\n }\n return result;\n }\n absl::StatusOr RunHloRematerialization(int64_t memory_limit_bytes,\n HloModule* module,\n int64_t min_remat_size = 0) {\n TF_EXPECT_OK(verifier().Run(module).status());\n HloRematerialization::RematerializationModeConfig config(\n false, true, false);\n auto shape_size_func = [](const Shape& shape) {\n return ShapeSizePadMinorTo64(shape);\n };\n HloCostAnalysis cost_analysis(shape_size_func);\n HloRematerialization::Options options(\n cost_analysis, config, memory_limit_bytes,\n 1, 1,\n min_remat_size, ChooseCompactLayoutForShape,\n std::nullopt,\n {});\n HloRematerialization::RematerializationSizes sizes;\n HloRematerialization remat(options, sizes);\n return remat.Run(module);\n }\n};\nTEST_F(CompressingRematerializationTest, OnlyRematBigBuffer) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_float {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(f32[] %x, f32[] %y)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %constant = f32[] constant(0)\n %broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}\n %broadcast.1 = f32[10,2]{1,0} broadcast(f32[] %param.0), dimensions={}\n %negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)\n %reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %reduce.2 = f32[] reduce(f32[10,2]{1,0} %broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)\n ROOT %add.2 = f32[] add(f32[] %add, f32[] %reduce.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(\n 30 * 1024,\n module.get(), 10 * 1024));\n EXPECT_TRUE(changed);\n HloInstruction* broadcast =\n module->entry_computation()->GetInstructionWithName(\"broadcast.0\");\n HloInstruction* broadcast_2 =\n module->entry_computation()->GetInstructionWithName(\"broadcast.1\");\n HloInstruction* reduce =\n module->entry_computation()->GetInstructionWithName(\"reduce.1\");\n HloInstruction* reduce_2 =\n module->entry_computation()->GetInstructionWithName(\"reduce.2\");\n EXPECT_THAT(reduce,\n op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));\n EXPECT_THAT(reduce_2, op::Reduce(broadcast_2, op::Constant()));\n}\nTEST_F(CompressingRematerializationTest, SingleRemat) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_float {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(f32[] %x, f32[] %y)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %constant = f32[] constant(0)\n %broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}\n %negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)\n %reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 30 * 1024, module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* broadcast =\n module->entry_computation()->GetInstructionWithName(\"broadcast.0\");\n HloInstruction* reduce =\n module->entry_computation()->GetInstructionWithName(\"reduce.1\");\n EXPECT_THAT(reduce,\n op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));\n}\nTEST_F(CompressingRematerializationTest, AvoidPathologicalCompress) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_float {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(f32[] %x, f32[] %y)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %constant = f32[] constant(0)\n %broadcast.0 = f32[63,60]{1,0} broadcast(f32[] %param.0), dimensions={}\n %broadcast.1 = f32[16,64]{1,0} broadcast(f32[] %param.0), dimensions={}\n %reduce.0 = f32[] reduce(%broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %reduce.1 = f32[] reduce(%broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 16 * 1024, module.get()));\n EXPECT_FALSE(changed);\n HloInstruction* broadcast =\n module->entry_computation()->GetInstructionWithName(\"broadcast.0\");\n HloInstruction* reduce =\n module->entry_computation()->GetInstructionWithName(\"reduce.1\");\n EXPECT_THAT(reduce, op::Reduce(broadcast, op::Constant()));\n}\nTEST_F(CompressingRematerializationTest, AllUsersUseSameCopy) {\n const std::string& hlo_string = R\"(\nHloModule fusion, is_scheduled=true\n%add_float {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(f32[] %x, f32[] %y)\n}\nENTRY %entry {\n %param.0 = f32[] parameter(0)\n %constant = f32[] constant(0)\n %broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}\n %negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)\n %reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %reduce.1 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %reduce.2 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)\n %reduce.3 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float\n %add.2 = f32[] add(f32[] %reduce.2, f32[] %reduce.3)\n ROOT %tuple = (f32[], f32[]) tuple (f32[] add, f32[] add.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 30 * 1024, module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* broadcast =\n module->entry_computation()->GetInstructionWithName(\"broadcast.0\");\n HloInstruction* reduce_2 =\n module->entry_computation()->GetInstructionWithName(\"reduce.2\");\n HloInstruction* reduce_3 =\n module->entry_computation()->GetInstructionWithName(\"reduce.3\");\n EXPECT_THAT(reduce_2,\n op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));\n EXPECT_THAT(reduce_3,\n op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));\n}\nclass OffloadingRematerializationTest : public RematerializationTestBase {\n protected:\n absl::StatusOr RunHloRematerialization(int64_t memory_limit_bytes,\n HloModule* module,\n int64_t min_remat_size = 0) {\n TF_EXPECT_OK(verifier().Run(module).status());\n if (!module->has_schedule()) {\n HloMemoryScheduler scheduler(\n [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },\n ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));\n TF_EXPECT_OK(scheduler.Run(module).status());\n }\n HloCostAnalysis::Options hlo_cost_analysis_options;\n hlo_cost_analysis_options.shape_size = [](const Shape& shape) {\n return ByteSizeOf(shape);\n };\n hlo_cost_analysis_options.set_flops_per_second(flops_per_second_);\n hlo_cost_analysis_options.set_transcendentals_per_second(\n transcendentals_per_second_);\n HloCostAnalysis cost_analysis(hlo_cost_analysis_options);\n HloRematerialization::RematerializationModeConfig config(\n false, false, true);\n HloRematerialization::HostMemoryOffloadConfig host_memory_offload_config(\n kHostMemorySpaceColor, copy_to_host_speed_, copy_from_host_speed_);\n HloRematerialization::Options options(\n cost_analysis, config, memory_limit_bytes,\n 1, 1,\n min_remat_size, nullptr,\n host_memory_offload_config,\n {});\n HloRematerialization::RematerializationSizes sizes;\n HloRematerialization remat(options, sizes);\n return remat.Run(module);\n }\n void SetCopyToHostSpeed(float val) { copy_to_host_speed_ = val; }\n void SetCopyFromHostSpeed(float val) { copy_from_host_speed_ = val; }\n void SetFlopsPerSecond(float val) { flops_per_second_ = val; }\n void SetTranscendentalsPerSecond(float val) {\n transcendentals_per_second_ = val;\n }\n static constexpr const int64_t kHostMemorySpaceColor{5};\n private:\n float copy_to_host_speed_{1.0f};\n float copy_from_host_speed_{1.0f};\n float flops_per_second_{1.0f};\n float transcendentals_per_second_{1.0f};\n};\nTEST_F(OffloadingRematerializationTest, BasicSuccessfulHostOffload) {\n const std::string& hlo_string = R\"(\nHloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}}\nENTRY MyModule {\n param_0 = f32[1024]{0} parameter(0)\n param_1 = f32[1024]{0} parameter(1)\n res_3 = f32[1024]{0} add(param_0, param_1)\n res_4 = f32[1024]{0} tanh(res_3)\n res_5 = f32[1024]{0} tanh(res_4)\n res_6 = f32[1024]{0} tanh(res_5)\n res_7 = f32[1024]{0} add(res_6, res_6)\n res_8 = f32[1024]{0} add(res_7, res_5)\n res_9 = f32[1024]{0} add(res_8, res_4)\n res_10 = f32[1024]{0} add(res_9, res_3)\n ROOT res_11 = f32[1024]{0} tanh(res_10)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n SetCopyToHostSpeed(4.0 * 1024);\n SetCopyFromHostSpeed(4.0 * 1024);\n SetFlopsPerSecond(2 * 1024);\n SetTranscendentalsPerSecond(2 * 1024);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 10 * 1024, module.get()));\n ASSERT_TRUE(changed);\n ASSERT_TRUE(module->has_schedule());\n auto res_3_matcher = op::Add(op::Parameter(), op::Parameter());\n auto res_3_rematted_matcher = op::AsyncCopy(\n xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,\n op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,\n res_3_matcher));\n auto res_4_matcher = op::Tanh(res_3_matcher);\n auto res_4_rematted_matcher = op::AsyncCopy(\n xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,\n op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,\n res_4_matcher));\n auto res_5_matcher = op::Tanh(res_4_matcher);\n auto res_6_matcher = op::Tanh(res_5_matcher);\n auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher);\n auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher);\n auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher);\n auto res_10_matcher = op::Add(res_9_matcher, res_3_rematted_matcher);\n const auto instruction_sequence =\n module->schedule().sequence(module->entry_computation());\n ASSERT_THAT(instruction_sequence.instructions().back(),\n op::Tanh(res_10_matcher));\n}\nTEST_F(OffloadingRematerializationTest, SkipOffloadWhenBitcastIsInvolved) {\n const std::string& hlo_string = R\"(\nHloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}}\nENTRY MyModule {\n param_0 = f32[1024]{0} parameter(0)\n param_1 = f32[1024]{0} parameter(1)\n res_3 = f32[1024]{0} add(param_0, param_1)\n bitcast = f32[1024]{0} bitcast(res_3)\n res_4 = f32[1024]{0} tanh(res_3)\n res_5 = f32[1024]{0} tanh(res_4)\n res_6 = f32[1024]{0} tanh(res_5)\n res_7 = f32[1024]{0} add(res_6, res_6)\n res_8 = f32[1024]{0} add(res_7, res_5)\n res_9 = f32[1024]{0} add(res_8, res_4)\n res_10 = f32[1024]{0} add(res_9, bitcast)\n ROOT res_11 = f32[1024]{0} tanh(res_10)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n SetCopyToHostSpeed(4.0 * 1024);\n SetCopyFromHostSpeed(4.0 * 1024);\n SetFlopsPerSecond(2 * 1024);\n SetTranscendentalsPerSecond(2 * 1024);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 10 * 1024, module.get()));\n ASSERT_TRUE(changed);\n ASSERT_TRUE(module->has_schedule());\n auto res_3_matcher = op::Add(op::Parameter(), op::Parameter());\n auto res_4_matcher = op::Tanh(res_3_matcher);\n auto res_4_rematted_matcher = op::AsyncCopy(\n xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,\n op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,\n res_4_matcher));\n auto res_5_matcher = op::Tanh(res_4_matcher);\n auto res_6_matcher = op::Tanh(res_5_matcher);\n auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher);\n auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher);\n auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher);\n auto res_10_matcher = op::Add(res_9_matcher, op::Bitcast(res_3_matcher));\n const auto instruction_sequence =\n module->schedule().sequence(module->entry_computation());\n ASSERT_THAT(instruction_sequence.instructions().back(),\n op::Tanh(res_10_matcher));\n}\nclass IndirectUseTest : public RecomputeAndCompressHloRematerializationTest,\n public ::testing::WithParamInterface {};\nTEST_P(IndirectUseTest, IndirectUseRematerialized) {\n const bool indirectly_used = GetParam();\n auto module = CreateNewVerifiedModule();\n HloComputation* subcomputation = nullptr;\n {\n auto builder = HloComputation::Builder(TestName() + \".subcomputation\");\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, vec1024_shape_, \"param\"));\n auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(\n ShapeUtil::MakeShape(xla::F32, {2048}), {param, param},\n 0));\n builder.AddInstruction(HloInstruction::CreateSlice(\n vec1024_shape_, concat, {0},\n {1024}, {1}));\n subcomputation = module->AddEmbeddedComputation(builder.Build());\n }\n auto builder = HloComputation::Builder(TestName());\n auto param = builder.AddInstruction(\n HloInstruction::CreateParameter(0, scalar_shape_, \"param\"));\n auto bcast = builder.AddInstruction(\n HloInstruction::CreateBroadcast(vec1024_shape_, param, {}));\n auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, bcast, bcast));\n auto call_1 = builder.AddInstruction(\n HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation));\n auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary(\n vec1024_shape_, HloOpcode::kAdd, bcast, call_1));\n auto tuple =\n builder.AddInstruction(HloInstruction::CreateTuple({bcast, add_2}));\n auto gte = builder.AddInstruction(HloInstruction::CreateGetTupleElement(\n vec1024_shape_, tuple, indirectly_used ? 0 : 1));\n builder.AddInstruction(\n HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, gte));\n HloComputation* entry_computation =\n module->AddEntryComputation(builder.Build());\n EXPECT_EQ(entry_computation->instruction_count(), 8);\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n RunHloRematerialization(\n 22 * 1024, module.get()));\n if (indirectly_used) {\n EXPECT_TRUE(changed);\n EXPECT_EQ(entry_computation->instruction_count(), 3);\n } else {\n EXPECT_TRUE(changed);\n EXPECT_EQ(entry_computation->instruction_count(), 9);\n }\n CheckForRematInInstructionNames(\n ::testing::UnitTest::GetInstance()->current_test_info()->name());\n}\nINSTANTIATE_TEST_SUITE_P(IndirectUseTestInstantiation, IndirectUseTest,\n ::testing::Values(true, false));\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1176,"cells":{"ID":{"kind":"string","value":"4c6920cc-3aba-4b4e-867d-febd43cab405"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"llvm_compiler"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/llvm_compiler.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/tests/llvm_compiler_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/llvm_compiler.h\"\n#include \n#include \n#include \n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_format.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_module_group.h\"\n#include \"xla/service/executable.h\"\n#include \"xla/service/stream_pool.h\"\n#include \"tsl/platform/denormal.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/profiler/lib/scoped_annotation.h\"\n#ifdef __FAST_MATH__\n#error \"Don't build XLA with -ffast-math\"\n#endif\nnamespace xla {\nabsl::StatusOr>> LLVMCompiler::Compile(\n std::unique_ptr module_group,\n std::vector> stream_execs,\n const CompileOptions& options) {\n tsl::port::ScopedDontFlushDenormal dont_flush_denormals;\n std::vector> result;\n std::vector> modules =\n module_group->ConsumeModules();\n for (size_t i = 0; i < modules.size(); i++) {\n tsl::profiler::ScopedAnnotation annotation{[&] {\n return absl::StrFormat(\"XlaCompile:#module=%s,program_id=%d#\",\n modules[i]->name(), modules[i]->unique_id());\n }};\n TF_ASSIGN_OR_RETURN(modules[i], RunHloPasses(std::move(modules[i]),\n stream_execs[i][0], options));\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr executable,\n RunBackend(std::move(modules[i]), stream_execs[i][0], options));\n result.push_back(std::move(executable));\n }\n return std::move(result);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/llvm_compiler.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"llvm/IR/Module.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module_group.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/backend.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/stream_executor/stream_executor.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/casts.h\"\n#include \"tsl/platform/test.h\"\n#include \"tsl/platform/threadpool.h\"\nnamespace xla {\nnamespace {\nusing LLVMCompilerTest = HloTestBase;\nconst char* const kHloText = R\"(\nHloModule Add\nENTRY main {\n constant.0 = f32[] constant(42.0)\n constant.1 = f32[] constant(43.0)\n ROOT add.0 = f32[] add(constant.0, constant.1)\n}\n)\";\nTEST_F(LLVMCompilerTest, HooksTest) {\n int pre_opt_hook_call_count = 0;\n int post_opt_hook_call_count = 0;\n auto pre_opt_hook = [&pre_opt_hook_call_count](const llvm::Module&) {\n ++pre_opt_hook_call_count;\n return absl::OkStatus();\n };\n auto post_opt_hook = [&post_opt_hook_call_count](const llvm::Module&) {\n ++post_opt_hook_call_count;\n return absl::OkStatus();\n };\n auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();\n LLVMCompiler* compiler =\n tensorflow::down_cast(backend().compiler());\n compiler->SetPreOptimizationHook(pre_opt_hook);\n compiler->SetPostOptimizationHook(post_opt_hook);\n ASSERT_TRUE(compiler\n ->RunBackend(std::move(hlo_module),\n backend().default_stream_executor(),\n nullptr)\n .ok());\n EXPECT_EQ(1, pre_opt_hook_call_count);\n EXPECT_EQ(1, post_opt_hook_call_count);\n}\nTEST_F(LLVMCompilerTest, DISABLED_MultiModuleCompilation) {\n auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value();\n auto hlo_module2 = ParseAndReturnVerifiedModule(kHloText).value();\n std::vector> modules;\n modules.push_back(std::move(hlo_module));\n modules.push_back(std::move(hlo_module2));\n auto module_group =\n std::make_unique(\"test_module_group\", std::move(modules));\n std::vector> executors;\n executors.push_back({backend().default_stream_executor()});\n executors.push_back({backend().default_stream_executor()});\n EXPECT_IS_OK(backend().compiler()->Compile(std::move(module_group),\n std::move(executors),\n backend().memory_allocator()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_compiler.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/llvm_compiler_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1177,"cells":{"ID":{"kind":"string","value":"134981e1-f110-441e-b364-4076f0bc05ec"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"gpu_p2p_pipeliner"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_p2p_pipeliner.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_p2p_pipeliner_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/gpu_p2p_pipeliner.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/collective_pipeliner.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/util.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nbool ShouldPipeline(const HloInstruction* instr) {\n if (!HloPredicateIsOp(instr)) {\n return false;\n }\n auto it = instr->frontend_attributes().map().find(kSendRecvPipelineAttr);\n if (it == instr->frontend_attributes().map().end()) {\n return false;\n }\n auto allowed_predecessor = [&]() {\n return instr->opcode() == HloOpcode::kRecvDone &&\n instr->control_predecessors().size() == 1 &&\n instr->control_predecessors()[0]->opcode() == HloOpcode::kSend;\n };\n if (!instr->control_successors().empty() ||\n (!instr->control_predecessors().empty() && !allowed_predecessor())) {\n return false;\n }\n bool is_pipelined =\n (instr->user_count() == 1 && instr->parent() != nullptr &&\n instr->users()[0] == instr->parent()->root_instruction());\n return !is_pipelined;\n}\nbool ShouldAllowLoopVariantParameterInChain(const HloInstruction* instr) {\n CHECK(instr->opcode() == HloOpcode::kGetTupleElement &&\n instr->operand(0)->opcode() == HloOpcode::kParameter);\n return true;\n}\nabsl::Status PostprocessP2PImpl(\n HloInstruction* instr,\n std::function&)> transformer) {\n if (!HloPredicateIsOp(instr)) {\n return Internal(\"Expected SendDone/RecvDone as the pipelined collective\");\n }\n instr = instr->mutable_operand(0);\n if (!HloPredicateIsOp(instr)) {\n return Internal(\"Expected Send/Recv as the SendDone/RecvDone operand\");\n }\n auto validation_it =\n instr->frontend_attributes().map().find(kSendRecvValidationAttr);\n if (validation_it == instr->frontend_attributes().map().end() ||\n validation_it->second == \"invalid\") {\n return absl::OkStatus();\n }\n auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second);\n if (!statusor_bounds.ok()) {\n return statusor_bounds.status();\n }\n std::string validation_attr = transformer(statusor_bounds.value());\n xla::FrontendAttributes attributes = instr->frontend_attributes();\n (*attributes.mutable_map())[kSendRecvValidationAttr] = validation_attr;\n instr->set_frontend_attributes(attributes);\n return absl::OkStatus();\n}\nabsl::Status PostprocessPeeledP2P(HloInstruction* instr) {\n auto transform_bounds = [&](std::vector& replica_groups) {\n std::vector> bounds;\n bounds.reserve(replica_groups.size());\n bool all_invalid = true;\n for (const auto& replica_group : replica_groups) {\n int64_t lower_bound = replica_group.replica_ids(0);\n int64_t upper_bound = replica_group.replica_ids(1);\n if (lower_bound <= 0 && upper_bound >= 0) {\n all_invalid = false;\n bounds.push_back({0, 0});\n } else {\n bounds.push_back({1, 0});\n }\n }\n std::string validation_attr;\n if (all_invalid) {\n validation_attr = \"invalid\";\n } else {\n validation_attr = \"{\" +\n absl::StrJoin(bounds, \",\",\n absl::PairFormatter(\n [](std::string* out, int64_t value) {\n absl::StrAppend(out, \"{\", value);\n },\n \",\",\n [](std::string* out, int64_t value) {\n absl::StrAppend(out, value, \"}\");\n })) +\n \"}\";\n }\n return validation_attr;\n };\n return PostprocessP2PImpl(instr, transform_bounds);\n};\nabsl::Status PostprocessRotatedP2P(HloInstruction* instr) {\n auto transform_bounds = [&](std::vector& replica_groups) {\n std::vector> bounds;\n bounds.reserve(replica_groups.size());\n bool all_invalid = true;\n for (const auto& replica_group : replica_groups) {\n int64_t lower_bound = replica_group.replica_ids(0);\n int64_t upper_bound = replica_group.replica_ids(1);\n if (lower_bound <= upper_bound) {\n if (lower_bound >= 1) {\n --lower_bound;\n }\n if (upper_bound >= 1) {\n --upper_bound;\n }\n if (lower_bound <= upper_bound) {\n all_invalid = false;\n bounds.push_back({lower_bound, upper_bound});\n } else {\n bounds.push_back({1, 0});\n }\n } else {\n bounds.push_back({lower_bound, upper_bound});\n }\n }\n std::string validation_attr;\n if (all_invalid) {\n validation_attr = \"invalid\";\n } else {\n validation_attr = \"{\" +\n absl::StrJoin(bounds, \",\",\n absl::PairFormatter(\n [](std::string* out, int64_t value) {\n absl::StrAppend(out, \"{\", value);\n },\n \",\",\n [](std::string* out, int64_t value) {\n absl::StrAppend(out, value, \"}\");\n })) +\n \"}\";\n }\n return validation_attr;\n };\n return PostprocessP2PImpl(instr, transform_bounds);\n}\n} \nvoid AddP2PPipeliner(HloPassPipeline& pipeline) {\n CollectivePipeliner::Config config{\n 0,\n INT64_MAX,\n true,\n false,\n true,\n CollectivePipeliner::PipeliningDirection::kBackward,\n ShouldPipeline,\n HloPredicateTrue,\n HloPredicateTrue,\n ShouldAllowLoopVariantParameterInChain,\n true,\n PostprocessPeeledP2P,\n PostprocessRotatedP2P};\n pipeline.AddPass(config);\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/gpu_p2p_pipeliner.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/tests/filecheck.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/util.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nclass GpuP2PPipelinerTest : public HloTestBase {\n public:\n GpuP2PPipelinerTest() {\n const int64_t kNumReplicas = 1;\n const int64_t kNumComputations = 4;\n config_ = GetModuleConfigForTest(kNumReplicas,\n kNumComputations);\n }\n absl::StatusOr RunOptimizer(HloModule* module) {\n HloPassPipeline pipeline(\"optimizer\");\n pipeline.AddPass(false,\n false);\n AddP2PPipeliner(pipeline);\n pipeline.AddPass(false,\n false);\n return pipeline.Run(module);\n }\n protected:\n HloModuleConfig config_;\n};\nTEST_F(GpuP2PPipelinerTest,\n TransformRecvSendBackwardsWithMetaDataPostProcessing) {\n const char* kHloStr = R\"(\n HloModule module\n cond {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n ub = u32[] constant(10)\n ROOT result = pred[] compare(count, ub), direction=LT\n }\n body {\n param = (u32[], u32[2]) parameter(0)\n count = get-tuple-element(param), index=0\n send-data = get-tuple-element(param), index=1\n after-all.0 = token[] after-all()\n recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=1,\n frontend_attributes={\n _xla_send_recv_source_target_pairs=\"{{1,0}}\",\n _xla_send_recv_pipeline=\"0\",\n _xla_send_recv_validation=\"{{1,7}}\"\n }\n after-all.0.s = token[] after-all()\n send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.s),\n channel_id=1, frontend_attributes={\n _xla_send_recv_source_target_pairs=\"{{1,0}}\",\n _xla_send_recv_pipeline=\"0\",\n _xla_send_recv_validation=\"{{1,7}}\"\n }\n recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,\n frontend_attributes={\n _xla_send_recv_pipeline=\"0\"\n }, control-predecessors={send.0}\n recv-data = u32[2] get-tuple-element(recv-done.0), index=0\n c1 = u32[] constant(1)\n new_count = u32[] add(count, c1)\n r = u32[2] broadcast(c1), dimensions={}\n s = u32[2] add(r, recv-data)\n send-done.0 = token[] send-done(send.0), channel_id=1,\n frontend_attributes={\n _xla_send_recv_pipeline=\"0\"\n }\n ROOT result = (u32[], u32[2]) tuple(new_count, s)\n }\n ENTRY test_computation {\n c0 = u32[] constant(0)\n c1 = u32[] constant(1)\n r = u32[] replica-id()\n a = u32[] add(c1, r)\n init = u32[2] broadcast(a), dimensions={}\n while_init = (u32[], u32[2]) tuple(c0, init)\n while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond\n ROOT result = u32[2] get-tuple-element(while_result), index=1\n })\";\n auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value();\n EXPECT_TRUE(RunOptimizer(module.get()).value());\n XLA_VLOG_LINES(10, module->ToString());\n auto while_op = FindInstruction(module.get(), \"while\");\n EXPECT_EQ(while_op->opcode(), HloOpcode::kWhile);\n EXPECT_EQ(while_op->shape().tuple_shapes().size(), 5);\n auto recv1 =\n DynCast(FindInstruction(module.get(), \"recv.1\"));\n EXPECT_NE(recv1, nullptr);\n auto recv2 =\n DynCast(FindInstruction(module.get(), \"recv.2\"));\n EXPECT_NE(recv2, nullptr);\n EXPECT_EQ(recv1->channel_id(), recv2->channel_id());\n auto send1 =\n DynCast(FindInstruction(module.get(), \"send.1\"));\n EXPECT_NE(send1, nullptr);\n auto send2 =\n DynCast(FindInstruction(module.get(), \"send.2\"));\n EXPECT_NE(send2, nullptr);\n EXPECT_EQ(send1->channel_id(), send2->channel_id());\n const char* kPeeledAttr = \"_xla_send_recv_validation=\\\"invalid\\\"\";\n const char* kRotatedAttr = \"_xla_send_recv_validation={{0,6}}\";\n EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kPeeledAttr));\n EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kPeeledAttr));\n EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kRotatedAttr));\n EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kRotatedAttr));\n}\nTEST_F(GpuP2PPipelinerTest, SendRecvForwardCycle) {\n const char* kHloStr = R\"(\n HloModule test\n while_body {\n inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)\n iter = u32[] get-tuple-element(inputs), index=0\n iter_increment = u32[] constant(1)\n next_iter = u32[] add(iter, iter_increment)\n weights = f32[2,2] get-tuple-element(inputs), index=2\n partition-id = u32[] partition-id()\n zero = u32[] constant(0)\n compare = pred[] compare(partition-id, zero), direction=EQ\n broadcast = pred[2,2] broadcast(compare), dimensions={}\n data = f32[2,2] get-tuple-element(inputs), index=1\n after-all = token[] after-all()\n send = (f32[2,2], u32[], token[]) send(data, after-all), channel_id=1,\n frontend_attributes={\n _xla_send_recv_pipeline=\"0\",\n _xla_send_recv_source_target_pairs=\"{{3,0}}\",\n _xla_send_recv_validation=\"{{3,10}}\"\n }\n recv = (f32[2,2], u32[], token[]) recv(after-all), channel_id=1,\n frontend_attributes={\n _xla_send_recv_pipeline=\"0\",\n _xla_send_recv_source_target_pairs=\"{{3,0}}\",\n _xla_send_recv_validation=\"{{3,10}}\"\n }\n recv-done = (f32[2,2], token[]) recv-done(recv), channel_id=1,\n frontend_attributes={_xla_send_recv_pipeline=\"0\"}, control-predecessors={send}\n recv-done-data = f32[2,2] get-tuple-element(recv-done), index=0\n after-all.1 = token[] after-all()\n send.1 = (f32[2,2], u32[], token[]) send(data, after-all.1), channel_id=2,\n frontend_attributes={\n _xla_send_recv_pipeline=\"1\",\n _xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3}}\",\n _xla_send_recv_validation=\"{{0,7},{1,8},{2,9}}\"\n }\n recv.1 = (f32[2,2], u32[], token[]) recv(after-all.1), channel_id=2,\n frontend_attributes={\n _xla_send_recv_pipeline=\"1\",\n _xla_send_recv_source_target_pairs=\"{{0,1},{1,2},{2,3}}\",\n _xla_send_recv_validation=\"{{0,7},{1,8},{2,9}}\"\n }\n recv-done.1 = (f32[2,2], token[]) recv-done(recv.1), channel_id=2,\n frontend_attributes={_xla_send_recv_pipeline=\"1\"}, control-predecessors={send.1}\n recv-done-1-data = f32[2,2] get-tuple-element(recv-done.1), index=0\n select = f32[2,2] select(broadcast, recv-done-data, recv-done-1-data)\n matmul = f32[2,2] dot(weights, select),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT result = (u32[], f32[2,2], f32[2,2]) tuple(next_iter, matmul, weights)\n send-done = token[] send-done(send), channel_id=1,\n frontend_attributes={_xla_send_recv_pipeline=\"0\"}\n send-done.1 = token[] send-done(send.1), channel_id=2,\n frontend_attributes={_xla_send_recv_pipeline=\"1\"}\n }\n while_cond {\n inputs = (u32[], f32[2,2], f32[2,2]) parameter(0)\n iter = u32[] get-tuple-element(inputs), index=0\n max_iter = u32[] constant(3)\n ROOT compare = pred[] compare(iter, max_iter), direction=LT\n }\n ENTRY test_computation {\n start_iter = u32[] constant(0)\n input_data = f32[2,2] parameter(0)\n input_weights = f32[2,2] parameter(1)\n input = (u32[], f32[2,2], f32[2,2]) tuple(start_iter, input_data, input_weights)\n while_result = (u32[], f32[2,2], f32[2,2]) while(input), condition=while_cond, body=while_body\n ROOT data_out = f32[2,2] get-tuple-element(while_result), index=1\n }\n )\";\n auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value();\n EXPECT_TRUE(RunOptimizer(module.get()).value());\n EXPECT_TRUE(RunFileCheck(module->ToString(), R\"(\n CHECK: %[[RECV_BWD_START:.*]] = {{.*}} after-all()\n CHECK: %[[RECV_BWD:.*]] = {{.*}} recv(token[] %[[RECV_BWD_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation={{[{][{]}}2,9{{[}][}]}}}\n CHECK: %[[RECV_DONE_BWD:.*]] = {{.*}} recv-done({{.*}} %[[RECV_BWD:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\"}\n CHECK: %[[RECV_FWD_START:.*]] = {{.*}} after-all()\n CHECK: %[[RECV_FWD:.*]] = {{.*}} recv(token[] %[[RECV_FWD_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,6},{0,7},{1,8{{[}][}]}}}\n CHECK: %[[RECV_DONE_FWD:.*]] = {{.*}} recv-done((f32[2,2]{1,0}, u32[], token[]) %[[RECV_FWD:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\"}\n CHECK: %[[SEND_BWD:.*]] = {{.*}} send({{.*}} %[[RECV_BWD_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation={{[{][{]}}2,9{{[}][}]}}}\n CHECK: %[[SEND_DONE_BWD:.*]] = {{.*}} send-done({{.*}} %[[SEND_BWD:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\"}\n CHECK: %[[SEND_FWD:.*]] = {{.*}} send({{.*}} %[[RECV_FWD_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,6},{0,7},{1,8{{[}][}]}}}\n CHECK: %[[SEND_DONE_FWD:.*]] = {{.*}} send-done({{.*}} %[[SEND_FWD:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\"}\n CHECK: %[[WHILE_COND:.*]] (cond_param: {{.*}}\n CHECK-NEXT: %[[COND_PARAM:.*]] = {{.*}} parameter(0)\n CHECK: %[[CURRENT_ITER:.*]] = {{.*}} get-tuple-element({{.*}} %[[COND_PARAM:.*]]), index=0\n CHECK: %[[TWO:.*]] = {{.*}} constant(2)\n CHECK: ROOT %[[COMPARE:.*]] = pred[] compare({{.*}} %[[CURRENT_ITER:.*]], {{.*}} %[[TWO:.*]]), direction=LT\n CHECK: ENTRY %[[TEST_COMPUTATION:.*]] (input_data: {{.*}}\n CHECK: %[[RECV_BWD_DUMMY_START:.*]] = {{.*}} after-all()\n CHECK: %[[RECV_BWD_DUMMY:.*]] = {{.*}} recv(token[] %[[RECV_BWD_DUMMY_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation=\"invalid\"}\n CHECK: %[[RECV_DONE_BWD_DUMMY:.*]] = {{.*}} recv-done({{.*}} %[[RECV_BWD_DUMMY:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\"}\n CHECK: %[[RECV_FWD_FIRST_ITER_START:.*]] = {{.*}} after-all()\n CHECK: %[[RECV_FWD_FIRST_ITER:.*]] = {{.*}} recv(token[] %[[RECV_FWD_FIRST_ITER_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,0},{1,0},{1,0{{[}][}]}}}\n CHECK: %[[RECV_DONE_FWD_FIRST_ITER:.*]] = {{.*}} recv-done({{.*}} %[[RECV_FWD_FIRST_ITER:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\"}\n CHECK: %[[SEND_BWD_DUMMY:.*]] = {{.*}} send({{.*}} %[[RECV_DUMMY_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation=\"invalid\"}\n CHECK: %[[SEND_DONE_BWD_DUMMY:.*]] = {{.*}} send-done({{.*}} %[[SEND_BWD_DUMMY:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline=\"0\"}\n CHECK: %[[SEND_FWD_FIRST_ITER:.*]] = {{.*}} send({{.*}} %[[RECV_FWD_FIRST_ITER_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,0},{1,0},{1,0{{[}][}]}}}\n CHECK: %[[SEND_DONE_FWD_FIRST_ITER:.*]] = {{.*}} send-done({{.*}} %[[SEND_FWD_FIRST_ITER:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline=\"1\"}\n CHECK: %[[START_LOOP_FROM_ITER_ONE:.*]] = u32[] constant(1)\n CHECK: %[[LOOP_INPUT:.*]] = {{.*}} tuple({{.*}} %[[START_LOOP_FROM_ITER_ONE:.*]])\n CHECK: %[[WHILE:.*]] = {{.*}} while({{.*}} %[[LOOP_INPUT:.*]]), {{.*}}\n )\")\n .value());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_p2p_pipeliner.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_p2p_pipeliner_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1178,"cells":{"ID":{"kind":"string","value":"25f2c367-30d8-42b1-ae1a-153d697a43ab"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"gpu_float_support"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_float_support.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_float_support_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/gpu_float_support.h\"\n#include \n#include \n#include \"absl/log/check.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/float_support.h\"\n#include \"xla/service/gpu/fusions/triton/triton_support.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla {\nnamespace gpu {\nbool GpuFloatSupport::SupportsMixedPrecisions(const HloInstruction& hlo) const {\n if (FloatSupport::SupportsMixedPrecisions(hlo)) return true;\n switch (hlo.opcode()) {\n case HloOpcode::kDot: {\n CHECK_GE(hlo.operand_count(), HloDotInstruction::kOperands);\n const PrimitiveType lhs_type = hlo.operand(0)->shape().element_type();\n const PrimitiveType rhs_type = hlo.operand(1)->shape().element_type();\n const PrimitiveType result_type = hlo.shape().element_type();\n return (lhs_type == F16 && rhs_type == F16 && result_type == F32) ||\n (lhs_type == BF16 && rhs_type == BF16 && result_type == F32);\n }\n default:\n return false;\n }\n}\nbool GpuFloatSupport::IsSupported(const HloInstruction& hlo) const {\n switch (hlo.opcode()) {\n case HloOpcode::kAllReduce:\n case HloOpcode::kAllReduceStart:\n case HloOpcode::kAllReduceDone:\n case HloOpcode::kReduceScatter:\n case HloOpcode::kDot:\n using TypeAndCC = std::pair<\n PrimitiveType,\n stream_executor::CudaComputeCapability::CudaComputeCapabilities>;\n for (auto [type, cc] :\n {TypeAndCC(F8E4M3FN, se::CudaComputeCapability::AMPERE),\n TypeAndCC(F8E5M2, se::CudaComputeCapability::HOPPER)}) {\n if (LowPrecisionType() == type) {\n auto* cuda_compute_capability =\n std::get_if(&compute_capability_);\n return cuda_compute_capability &&\n cuda_compute_capability->IsAtLeast(cc) &&\n IsTritonFusedComputation(*hlo.parent());\n }\n }\n return LowPrecisionType() == BF16;\n case HloOpcode::kAllGather:\n case HloOpcode::kAllToAll:\n case HloOpcode::kBroadcast:\n case HloOpcode::kCollectivePermute:\n case HloOpcode::kConcatenate:\n case HloOpcode::kCopy:\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kDynamicUpdateSlice:\n case HloOpcode::kGather:\n case HloOpcode::kPad:\n case HloOpcode::kReshape:\n case HloOpcode::kReverse:\n case HloOpcode::kScatter:\n case HloOpcode::kSelect:\n case HloOpcode::kSelectAndScatter:\n case HloOpcode::kSlice:\n case HloOpcode::kTranspose:\n case HloOpcode::kBitcast:\n return true;\n case HloOpcode::kAdd:\n case HloOpcode::kSubtract:\n case HloOpcode::kMultiply: {\n if (LowPrecisionType() == BF16) {\n auto* cuda_compute_capability =\n std::get_if(&compute_capability_);\n return cuda_compute_capability != nullptr &&\n cuda_compute_capability->IsAtLeastHopper();\n }\n return false;\n }\n default:\n return false;\n }\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/gpu_float_support.h\"\n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/float_normalization.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/gpu/ir_emission_utils.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/test_helpers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla::gpu {\nnamespace {\nclass FloatSupportTest : public HloTestBase {\n protected:\n FloatSupportTest()\n : HloTestBase(false,\n true) {}\n bool Normalize(HloModule* module, se::GpuComputeCapability cc,\n PrimitiveType low_precision_type,\n PrimitiveType high_precision_type) {\n GpuFloatSupport float_support(cc, low_precision_type, high_precision_type);\n FloatNormalization normalization(&float_support);\n absl::StatusOr result = normalization.Run(module);\n EXPECT_IS_OK(result.status());\n HloVerifier verifier(false,\n true);\n EXPECT_IS_OK(verifier.Run(module).status());\n return result.value();\n }\n std::unique_ptr CreateComputation(PrimitiveType lhs_type,\n PrimitiveType rhs_type,\n PrimitiveType result_type) {\n auto builder = HloComputation::Builder(TestName());\n Shape lhs_shape = ShapeUtil::MakeShape(lhs_type, {3, 3});\n Shape rhs_shape = ShapeUtil::MakeShape(rhs_type, {3, 3});\n Shape result_shape = ShapeUtil::MakeShape(result_type, {3, 3});\n HloInstruction* a = builder.AddInstruction(\n HloInstruction::CreateParameter(0, lhs_shape, \"a\"));\n HloInstruction* b = builder.AddInstruction(\n HloInstruction::CreateParameter(1, rhs_shape, \"b\"));\n PrecisionConfig precision_config;\n DotDimensionNumbers dot_dnums;\n dot_dnums.add_lhs_contracting_dimensions(1);\n dot_dnums.add_rhs_contracting_dimensions(1);\n builder.AddInstruction(HloInstruction::CreateDot(\n result_shape, a, b, dot_dnums, precision_config));\n return builder.Build();\n }\n void TestDotConversion(PrimitiveType lhs_type, PrimitiveType rhs_type,\n PrimitiveType result_type, se::GpuComputeCapability cc,\n bool should_convert_lhs, bool should_convert_rhs,\n PrimitiveType low_precision_type,\n PrimitiveType high_precision_type = F16) {\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddEntryComputation(\n CreateComputation(lhs_type, rhs_type, result_type));\n EXPECT_EQ(\n Normalize(module.get(), cc, low_precision_type, high_precision_type),\n should_convert_lhs || should_convert_rhs);\n EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kDot);\n EXPECT_EQ(computation->root_instruction()->operand(0)->opcode() ==\n HloOpcode::kConvert,\n should_convert_lhs);\n EXPECT_EQ(computation->root_instruction()->operand(1)->opcode() ==\n HloOpcode::kConvert,\n should_convert_rhs);\n }\n void TestTritonFusedDot(PrimitiveType lhs_type, PrimitiveType rhs_type,\n PrimitiveType result_type,\n se::GpuComputeCapability cc, bool should_convert_lhs,\n bool should_convert_rhs,\n PrimitiveType low_precision_type,\n PrimitiveType high_precision_type = F16) {\n auto module = CreateNewVerifiedModule();\n auto computation = module->AddComputationAndUnifyNamesAndIds(\n CreateComputation(lhs_type, rhs_type, result_type), false);\n Shape lhs_shape = ShapeUtil::MakeShape(lhs_type, {3, 3});\n Shape rhs_shape = ShapeUtil::MakeShape(rhs_type, {3, 3});\n Shape result_shape = ShapeUtil::MakeShape(result_type, {3, 3});\n auto builder = HloComputation::Builder(\"main\");\n HloInstruction* a = builder.AddInstruction(\n HloInstruction::CreateParameter(0, lhs_shape, \"a\"));\n HloInstruction* b = builder.AddInstruction(\n HloInstruction::CreateParameter(1, rhs_shape, \"b\"));\n HloInstruction* fusion =\n builder.AddInstruction(HloInstruction::CreateFusion(\n result_shape, HloInstruction::FusionKind::kCustom, {a, b},\n computation));\n GpuBackendConfig config;\n config.mutable_fusion_backend_config()->set_kind(\n std::string(kTritonGemmFusionKind));\n CHECK_OK(fusion->set_backend_config(config));\n module->AddEntryComputation(builder.Build());\n EXPECT_EQ(\n Normalize(module.get(), cc, low_precision_type, high_precision_type),\n should_convert_lhs || should_convert_rhs);\n EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kDot);\n EXPECT_EQ(computation->root_instruction()->operand(0)->opcode() ==\n HloOpcode::kConvert,\n should_convert_lhs);\n EXPECT_EQ(computation->root_instruction()->operand(1)->opcode() ==\n HloOpcode::kConvert,\n should_convert_rhs);\n }\n};\nTEST_F(FloatSupportTest, ShouldAlwaysConvertFp8Dot) {\n TestDotConversion(F8E4M3FN, F8E4M3FN, F16,\n se::CudaComputeCapability::Hopper(),\n true,\n true, F8E4M3FN);\n TestDotConversion(F8E4M3FN, F8E4M3FN, F32,\n se::CudaComputeCapability::Hopper(),\n true,\n true, F8E4M3FN);\n TestDotConversion(F8E4M3FN, F8E4M3FN, F16,\n se::CudaComputeCapability::Ampere(),\n true,\n true, F8E4M3FN);\n TestDotConversion(F8E4M3FN, F8E4M3FN, F32,\n se::CudaComputeCapability::Hopper(),\n true,\n true, F8E4M3FN);\n TestDotConversion(F8E5M2, F8E5M2, F16, se::CudaComputeCapability::Ampere(),\n true,\n true, F8E5M2);\n TestDotConversion(F8E5M2, F8E5M2, F32, se::CudaComputeCapability::Ampere(),\n true,\n true, F8E5M2);\n TestDotConversion(F8E5M2, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(),\n true,\n false, F8E5M2);\n TestDotConversion(F8E5M2, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(),\n true,\n false, F8E5M2);\n TestDotConversion(F8E5M2, F16, F16, se::CudaComputeCapability::Hopper(),\n true,\n false, F8E5M2);\n TestDotConversion(F8E5M2, F16, F32, se::CudaComputeCapability::Hopper(),\n true,\n false, F8E5M2);\n}\nTEST_F(FloatSupportTest, ShouldConverTritonUnsupportedFp8Dot) {\n TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F16,\n se::CudaComputeCapability::Hopper(),\n true,\n true, F8E4M3FN);\n TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F32,\n se::CudaComputeCapability::Hopper(),\n false,\n false, F8E4M3FN);\n TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F16,\n se::CudaComputeCapability::Ampere(),\n true,\n true, F8E4M3FN);\n TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F32,\n se::CudaComputeCapability::Hopper(),\n false,\n false, F8E4M3FN);\n TestTritonFusedDot(F8E5M2, F8E5M2, F16, se::CudaComputeCapability::Ampere(),\n true,\n true, F8E5M2);\n TestTritonFusedDot(F8E5M2, F8E5M2, F32, se::CudaComputeCapability::Ampere(),\n true,\n true, F8E5M2);\n TestTritonFusedDot(F8E5M2, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(),\n true,\n false, F8E5M2);\n TestTritonFusedDot(F8E5M2, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(),\n false,\n false, F8E5M2);\n TestTritonFusedDot(F8E5M2, F16, F16, se::CudaComputeCapability::Hopper(),\n true,\n false, F8E5M2);\n TestTritonFusedDot(F8E5M2, F16, F32, se::CudaComputeCapability::Hopper(),\n true,\n false, F8E5M2);\n}\nTEST_F(FloatSupportTest, ShouldKeepBf16OnAmpere) {\n TestDotConversion(BF16, BF16, F32, se::CudaComputeCapability::Ampere(),\n false,\n false, BF16);\n}\nTEST_F(FloatSupportTest, ShouldKeepBf16OnHopper) {\n TestDotConversion(BF16, BF16, F32, se::CudaComputeCapability::Hopper(),\n false,\n false, BF16);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_float_support.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_float_support_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1179,"cells":{"ID":{"kind":"string","value":"914cb876-e268-4338-974a-bdc42eb9ee1a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"triton_tiling_propagation"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/triton_tiling_propagation.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/triton_tiling_propagation.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/layout.h\"\n#include \"xla/permutation_util.h\"\n#include \"xla/service/gpu/fusions/triton/triton_support.h\"\n#include \"xla/service/gpu/fusions/triton/triton_support_legacy.h\"\n#include \"xla/service/instruction_fusion.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nabsl::flat_hash_map\nFilterTrivialDims(\n const absl::flat_hash_map&\n dim_iter_specs) {\n absl::flat_hash_map\n non_trivial_dim_iteration_specs;\n for (const auto& [dim, dim_spec] : dim_iter_specs) {\n if (dim_spec.size() == 1 && dim_spec[0].count == 1) {\n continue;\n }\n non_trivial_dim_iteration_specs[dim] = dim_spec;\n }\n return non_trivial_dim_iteration_specs;\n}\n} \nconst TensorIterationSpec::DimIterationSpec* TensorIterationSpec::Find(\n const int dimension) const {\n if (auto it = dim_iteration_specs_.find(dimension);\n it != dim_iteration_specs_.end()) {\n return &it->second;\n }\n return nullptr;\n}\nstd::vector TensorIterationSpec::GetDimensions() const {\n std::vector result;\n result.reserve(dim_iteration_specs_.size());\n for (const auto& [dim, _] : dim_iteration_specs_) {\n result.push_back(dim);\n }\n return result;\n}\nbool TensorIterationSpec::IsPhysicallyEquivalent(\n const TensorIterationSpec& other) const {\n const absl::flat_hash_map\n non_trivial_dim_iteration_specs = FilterTrivialDims(dim_iteration_specs_);\n const absl::flat_hash_map\n other_non_trivial_dim_iteration_specs =\n FilterTrivialDims(other.dim_iteration_specs_);\n if (non_trivial_dim_iteration_specs.size() !=\n other_non_trivial_dim_iteration_specs.size()) {\n return false;\n }\n for (const auto& pair : non_trivial_dim_iteration_specs) {\n int dimension = pair.first;\n const DimIterationSpec& dim_iter_spec = pair.second;\n auto other_it = other_non_trivial_dim_iteration_specs.find(dimension);\n if (other_it == other_non_trivial_dim_iteration_specs.end()) {\n return false;\n }\n const DimIterationSpec& other_dim_iter_spec = other_it->second;\n if (dim_iter_spec.size() != other_dim_iter_spec.size()) {\n return false;\n }\n for (size_t i = 0; i < dim_iter_spec.size(); i++) {\n if (!dim_iter_spec[i].IsPhysicallyEquivalent(other_dim_iter_spec[i])) {\n return false;\n }\n }\n }\n return true;\n}\nstd::string TensorIterationSpec::IterationSpecFragment::ToString() const {\n return absl::StrCat(\"{stride=\", stride, \", count=\", count,\n \", slice_start=\", slice_start,\n \", sliced_count=\", sliced_count, \", subfragments=[\",\n absl::StrJoin(subfragments, \", \"), \"]}\");\n}\nstd::string TensorIterationSpec::ToString() const {\n return absl::StrCat(\n \"{\",\n absl::StrJoin(dim_iteration_specs_, \", \",\n [&](std::string* s, const auto& kv) {\n absl::StrAppend(\n s, kv.first, \": \", \"[\",\n absl::StrJoin(kv.second, \", \",\n [&](std::string* ss, const auto& v) {\n absl::StrAppend(ss, v.ToString());\n }),\n \"]\");\n }),\n \"}\");\n}\nnamespace triton_fusion {\nusing Fragment = DimensionOrder::Fragment;\nusing Fragments = DimensionOrder::Fragments;\nusing FragmentOrders = DimensionOrder::FragmentOrders;\n DimensionOrder DimensionOrder::FromDotOperandOrOutput(\n const HloInstruction& hlo, const int split_k_dimension_index) {\n DimensionOrder dim_order;\n dim_order.tensor_fragments_order_.reserve(hlo.shape().rank());\n for (const int i : hlo.shape().layout().minor_to_major()) {\n int target_dim_number = i;\n if (i == split_k_dimension_index) {\n CHECK(!dim_order.tensor_fragments_order_.empty())\n << \"The split-K batch dimension has be preceded by the contracting \"\n \"dimension it originates from by construction.\";\n target_dim_number =\n dim_order.tensor_fragments_order_.back().dst_dim_number();\n }\n dim_order.dim_fragments_orders_[target_dim_number].push_back(\n dim_order.tensor_fragments_order_.size());\n dim_order.tensor_fragments_order_.push_back(\n Fragment{target_dim_number, hlo.shape().dimensions(i)});\n }\n return dim_order;\n}\nstd::string DimensionOrder::Fragment::ToString() const {\n return absl::StrCat(dst_dim_number_, \":\", count_, \":\", slice_start_, \"-\",\n sliced_count_);\n}\nstd::string DimensionOrder::ToString() const {\n std::string ret = absl::StrJoin(tensor_fragments_order_, \" - \",\n [](std::string* out, const Fragment& f) {\n absl::StrAppend(out, f.ToString(), \" \");\n });\n absl::StrAppend(&ret, \"|\");\n for (const auto& [dim, fragments] : dim_fragments_orders_) {\n absl::StrAppend(&ret, dim, \":\", absl::StrJoin(fragments, \",\"), \" \");\n }\n return ret;\n}\nTensorIterationSpec DimensionOrder::ToTensorIterationSpec() const {\n const Fragments& dim_fragments = TensorFragmentsOrder();\n TensorIterationSpec tensor_spec;\n int64_t accumulated_stride = 1;\n int last_dim = -1;\n for (int dim_order_index = 0; dim_order_index < dim_fragments.size();\n ++dim_order_index) {\n const DimensionOrder::Fragment& fragment = dim_fragments[dim_order_index];\n VLOG(6) << fragment.ToString();\n TensorIterationSpec::DimIterationSpec& dim_spec =\n tensor_spec[fragment.dst_dim_number()];\n if (last_dim == fragment.dst_dim_number()) {\n if (!dim_spec.empty() && !dim_spec.back().subfragments.empty() &&\n dim_spec.back().subfragments.back() == 1) {\n dim_spec.back().subfragments.pop_back();\n }\n if (fragment.full_count() > 1) {\n CHECK(!dim_spec.empty());\n CHECK(!dim_spec.back().is_sliced())\n << \"Only the major-most fragment can have an offset.\";\n dim_spec.back().slice_start =\n fragment.slice_start() * dim_spec.back().count;\n dim_spec.back().sliced_count =\n fragment.sliced_count() * dim_spec.back().count;\n dim_spec.back().count *= fragment.full_count();\n dim_spec.back().subfragments.push_back(fragment.sliced_count());\n }\n } else {\n dim_spec.push_back(TensorIterationSpec::IterationSpecFragment{\n accumulated_stride,\n fragment.full_count(),\n fragment.slice_start(),\n fragment.sliced_count(),\n {fragment.sliced_count()}});\n }\n accumulated_stride *= fragment.full_count();\n last_dim = fragment.dst_dim_number();\n }\n for (int dim_idx : tensor_spec.GetDimensions()) {\n TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[dim_idx];\n if (dim_spec.size() <= 1) continue;\n TensorIterationSpec::DimIterationSpec filtered_dim_spec;\n absl::c_copy_if(dim_spec, std::back_inserter(filtered_dim_spec),\n [](const TensorIterationSpec::IterationSpecFragment& f) {\n return f.count != 1;\n });\n tensor_spec[dim_idx] = filtered_dim_spec;\n }\n tensor_spec.RemoveEmptyDimensions();\n return tensor_spec;\n}\nnamespace {\nstd::optional LogicalIndexOfLabeledDimension(\n const Shape& shape, const DimensionOrder& dim_order, const int label) {\n auto fragment_it = dim_order.TensorFragmentsOrder().cbegin();\n for (int dim : shape.layout().minor_to_major()) {\n const int64_t dim_size = shape.dimensions()[dim];\n int64_t fragments_size = 1;\n while (fragments_size < dim_size) {\n fragments_size *= fragment_it->full_count();\n if (fragment_it->dst_dim_number() == label) {\n return dim;\n }\n ++fragment_it;\n }\n }\n return std::nullopt;\n}\nusing Int64OrError = std::variant;\nInt64OrError CombineSplitDimMajorPartSizeReqs(int64_t a, int64_t b) {\n if (a == b || b == kNoSplitRequirement) {\n return a;\n }\n if (a == kNoSplitRequirement) {\n return b;\n }\n return FusionDecision::Forbid(\"Conflicting splits of splittable dimension\");\n}\n} \nDotRequirementsOrError CombineDotRequirements(\n DotRequirements a, DotRequirementsOrError b_or_error) {\n if (std::holds_alternative(b_or_error)) {\n return b_or_error;\n }\n const DotRequirements& b = std::get(b_or_error);\n Int64OrError combined_size_req =\n CombineSplitDimMajorPartSizeReqs(a.splittable_dimension_major_part_size,\n b.splittable_dimension_major_part_size);\n if (std::holds_alternative(combined_size_req)) {\n return std::get(combined_size_req);\n }\n return DotRequirements(std::get(combined_size_req));\n}\nnamespace {\nDotRequirementsOrError GetRequirementsIfSupportedOrder(\n const DimensionOrder& order, const DotProperties& properties) {\n VLOG(8) << order.ToString();\n int64_t split_dim_major_part = kNoSplitRequirement;\n const Fragments& tensor_dim_fragments = order.TensorFragmentsOrder();\n for (const auto& [dim_index, dim_fragments] : order.DimFragmentsOrders()) {\n CHECK(!dim_fragments.empty());\n for (int i = 0; i < dim_fragments.size() - 1; ++i) {\n if (tensor_dim_fragments[dim_fragments[i]].is_sliced()) {\n return FusionDecision::Forbid(\"Sliced non-major-most fragment.\");\n }\n }\n int group_counter = 0;\n int last_seen_group_last_fragment_index = -1;\n auto fragment_it = dim_fragments.cbegin();\n while (true) {\n if (fragment_it == dim_fragments.cend()) {\n break;\n }\n int64_t grouped_size = tensor_dim_fragments[*fragment_it].full_count();\n while ((fragment_it + 1) != dim_fragments.cend() &&\n *(fragment_it + 1) == *fragment_it + 1) {\n ++fragment_it;\n grouped_size *= tensor_dim_fragments[*fragment_it].full_count();\n }\n if (grouped_size == 1) {\n ++fragment_it;\n continue;\n }\n if (last_seen_group_last_fragment_index > *fragment_it) {\n return FusionDecision::Forbid(\"Transpose within a dimension.\");\n }\n ++group_counter;\n if (group_counter > 1) {\n const int splittable_dimension_index =\n properties.splittable_dimension_index;\n if (dim_index == splittable_dimension_index) {\n if (group_counter == 2) {\n if (split_dim_major_part != kNoSplitRequirement &&\n split_dim_major_part != grouped_size) {\n return FusionDecision::Forbid(\n \"Conflicting splits of splittable dimension\");\n }\n split_dim_major_part = grouped_size;\n } else if (group_counter > 2) {\n return FusionDecision::Forbid(\n \"2nd split of a splittable dimension.\");\n }\n } else {\n return FusionDecision::Forbid(\"Unsupported split of a dimension.\");\n }\n }\n last_seen_group_last_fragment_index = *fragment_it;\n ++fragment_it;\n }\n }\n return DotRequirements(split_dim_major_part);\n}\nDotRequirementsOrError GetRequirementsIfSupportedOrders(\n const HloInstruction& hlo, const DimOrderMap& dim_orders,\n const DotProperties& properties) {\n const DotRequirements empty_requirements(kNoSplitRequirement);\n auto get_requirements =\n [&](const HloInstruction& instr) -> DotRequirementsOrError {\n if (auto it = dim_orders.find(&instr); it != dim_orders.end()) {\n return GetRequirementsIfSupportedOrder(it->second, properties);\n }\n return empty_requirements;\n };\n DotRequirements requirements = empty_requirements;\n for (const HloInstruction* operand : hlo.operands()) {\n DotRequirementsOrError requirements_or_error =\n CombineDotRequirements(requirements, get_requirements(*operand));\n if (std::holds_alternative(requirements_or_error)) {\n return requirements_or_error;\n }\n requirements = std::get(requirements_or_error);\n }\n return CombineDotRequirements(requirements, get_requirements(hlo));\n}\nDimOrderMap GetPropagatedDimOrdersForElementwise(\n const HloInstruction& hlo, TransformDirection direction,\n const DimensionOrder& src_dim_order) {\n if (direction == TransformDirection::kOutputToInput) {\n DimOrderMap map;\n for (const HloInstruction* operand : hlo.operands()) {\n map.insert({operand, src_dim_order});\n }\n return map;\n }\n return {{&hlo, src_dim_order}};\n}\nconst HloInstruction& GetSourceHlo(const HloInstruction& hlo,\n TransformDirection direction) {\n CHECK_GE(hlo.operand_count(), 1);\n if (direction == TransformDirection::kOutputToInput) {\n return hlo;\n }\n return *hlo.operand(0);\n}\nusing ConstInstructionVector = absl::InlinedVector;\nConstInstructionVector GetDestHlos(const HloInstruction& hlo,\n TransformDirection direction) {\n if (direction == TransformDirection::kInputToOutput) {\n return {&hlo};\n }\n ConstInstructionVector hlos;\n hlos.reserve(hlo.operands().size());\n for (const HloInstruction* operand : hlo.operands()) {\n hlos.push_back(operand);\n }\n return hlos;\n}\nconst HloInstruction& GetDestHlo(const HloInstruction& hlo,\n TransformDirection direction) {\n CHECK_EQ(hlo.operand_count(), 1);\n if (direction == TransformDirection::kInputToOutput) {\n return hlo;\n }\n return *hlo.operand(0);\n}\nDimOrderMapOrError GetPropagatedDimOrdersForBitcast(\n const HloInstruction& hlo, const TransformDirection direction,\n const DimensionOrder& src_dim_order, const DotProperties& properties) {\n const HloInstruction& dst = GetDestHlo(hlo, direction);\n const Shape& dst_shape = dst.shape();\n const Fragments& src_fragments_order = src_dim_order.TensorFragmentsOrder();\n DimOrderMap dst_dim_orders;\n DimensionOrder& dst_dim_order =\n dst_dim_orders.insert({&dst, DimensionOrder()}).first->second;\n Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();\n int64_t dst_remaining_size = 1;\n absl::flat_hash_map> src_to_dst;\n auto dst_dim_it = dst_shape.layout().minor_to_major().cbegin();\n const auto dst_dim_end = dst_shape.layout().minor_to_major().cend();\n for (auto src_dim = src_fragments_order.cbegin();\n src_dim != src_fragments_order.cend(); ++src_dim) {\n auto add_new_fragment = [&](const Fragment& fragment) {\n dst_fragments_order.push_back(fragment);\n src_to_dst[&*src_dim].push_back(dst_fragments_order.size() - 1);\n };\n if (dst_remaining_size >= src_dim->full_count()) {\n if (dst_remaining_size % src_dim->full_count()) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n add_new_fragment(*src_dim);\n dst_remaining_size /= src_dim->full_count();\n } else {\n int64_t src_remaining_size = src_dim->full_count();\n if (dst_remaining_size > 1) {\n if (src_remaining_size % dst_remaining_size || (src_dim->is_sliced())) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n add_new_fragment(\n Fragment{src_dim->dst_dim_number(), dst_remaining_size});\n src_remaining_size /= dst_remaining_size;\n dst_remaining_size = 1;\n }\n while (src_remaining_size > 1) {\n CHECK(dst_dim_it != dst_dim_end);\n int64_t dst_dim_size = dst_shape.dimensions(*dst_dim_it);\n int64_t new_fragment_size = dst_dim_size;\n if (dst_dim_size > src_remaining_size) {\n if (dst_dim_size % src_remaining_size) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n dst_remaining_size = dst_dim_size / src_remaining_size;\n new_fragment_size = src_remaining_size;\n }\n if (src_dim->is_sliced()) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n add_new_fragment(\n Fragment{src_dim->dst_dim_number(), new_fragment_size});\n src_remaining_size /= new_fragment_size;\n ++dst_dim_it;\n }\n }\n }\n CHECK_EQ(dst_remaining_size, 1);\n while (dst_dim_it != dst_dim_end) {\n if (dst_shape.dimensions(*dst_dim_it) != 1) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n if (!dst_fragments_order.empty()) {\n dst_fragments_order.push_back(\n Fragment{dst_fragments_order.back().dst_dim_number(), 1});\n src_to_dst[&src_fragments_order.back()].push_back(\n dst_fragments_order.size() - 1);\n }\n ++dst_dim_it;\n }\n FragmentOrders& dst_dim_fragment_orders = dst_dim_order.DimFragmentsOrders();\n for (const auto& [dim_index, dim_sequence] :\n src_dim_order.DimFragmentsOrders()) {\n std::vector& dst = dst_dim_fragment_orders[dim_index];\n dst.reserve(dim_sequence.size());\n for (const int src : dim_sequence) {\n std::copy(src_to_dst[&src_fragments_order[src]].cbegin(),\n src_to_dst[&src_fragments_order[src]].cend(),\n std::back_inserter(dst));\n }\n }\n return dst_dim_orders;\n}\nDimOrderMapOrError GetPropagatedDimOrdersForDimAlteringOp(\n const HloInstruction& hlo, const TransformDirection direction,\n const DimensionOrder& src_dim_order, const DotProperties& properties) {\n std::list new_fragments;\n const HloInstruction& src = GetSourceHlo(hlo, direction);\n Fragments src_fragments_order = src_dim_order.TensorFragmentsOrder();\n if (hlo.opcode() == HloOpcode::kSlice &&\n ShapeUtil::IsEffectiveScalar(hlo.shape())) {\n return FusionDecision::Forbid(\"Slice to scalar is not implemented yet.\");\n }\n std::vector> src_physical;\n src_physical.reserve(src.shape().rank());\n if (src_fragments_order.size() < src.shape().rank()) {\n return FusionDecision::Forbid(\n \"Cannot propagate further from trivial sized tensor\");\n }\n auto src_fragment_it = src_fragments_order.begin();\n for (int64_t dim_index : src.shape().layout().minor_to_major()) {\n const int64_t dim_size = src.shape().dimensions(dim_index);\n int64_t subdim_size_accumulator = 1;\n std::vector subdim_group;\n do {\n CHECK(src_fragment_it != src_fragments_order.end());\n subdim_size_accumulator *= src_fragment_it->full_count();\n subdim_group.push_back(&*src_fragment_it);\n ++src_fragment_it;\n } while (subdim_size_accumulator < dim_size);\n CHECK_EQ(subdim_size_accumulator, dim_size);\n src_physical.push_back(subdim_group);\n }\n std::vector> src_logical;\n src_logical.resize(src_physical.size());\n for (int i = 0; i < src_physical.size(); ++i) {\n src_logical[src.shape().layout().minor_to_major(i)] = src_physical[i];\n }\n DimOrderMap dst_dim_orders;\n int64_t concat_accumulated_size = 0;\n for (const HloInstruction* dst : GetDestHlos(hlo, direction)) {\n DimensionOrder& dst_dim_order =\n dst_dim_orders.insert({dst, DimensionOrder()}).first->second;\n std::vector> dst_logical;\n if (hlo.opcode() == HloOpcode::kTranspose) {\n const auto* transpose = Cast(&hlo);\n std::vector permutation(transpose->dimensions().cbegin(),\n transpose->dimensions().cend());\n if (direction == TransformDirection::kInputToOutput) {\n permutation = InversePermutation(permutation);\n }\n dst_logical.resize(permutation.size());\n for (int i = 0; i < permutation.size(); ++i) {\n dst_logical[permutation[i]] = src_logical[i];\n }\n } else if (hlo.opcode() == HloOpcode::kBroadcast) {\n const auto* broadcast = Cast(&hlo);\n dst_logical.resize(broadcast->dimensions().size());\n for (int i = 0; i < broadcast->dimensions().size(); ++i) {\n dst_logical[i] = src_logical[broadcast->dimensions()[i]];\n }\n } else if (hlo.opcode() == HloOpcode::kReduce) {\n if (dst != &hlo && hlo.operand_index(dst) == 1) {\n continue;\n }\n const auto* reduce = Cast(&hlo);\n dst_logical.resize(src_logical.size() + reduce->dimensions().size());\n if (reduce->dimensions().size() != 1) {\n return FusionDecision::Forbid(\"Unsupported reduction.\");\n } else if (reduce->dimensions().front() !=\n reduce->operand(0)->shape().rank() - 1) {\n return FusionDecision::Forbid(\"Only row reductions are supported.\");\n }\n } else if (hlo.opcode() == HloOpcode::kConcatenate) {\n dst_logical.resize(src_logical.size());\n for (int i = 0; i < src_logical.size(); ++i) {\n if (i == hlo.concatenate_dimension()) {\n if (src_logical[i].size() != 1 || src_logical[i][0]->is_sliced()) {\n return FusionDecision::Forbid(\"Unsupported concatenation.\");\n }\n const Fragment& src_fragment = *src_logical[i][0];\n Fragment& dst_fragment = new_fragments.emplace_back(\n src_fragment.dst_dim_number(), dst->shape().dimensions(i));\n dst_fragment.set_slice(-concat_accumulated_size,\n dst->shape().dimensions(i));\n concat_accumulated_size += dst->shape().dimensions(i);\n dst_logical[i].push_back(&dst_fragment);\n } else {\n dst_logical[i] = src_logical[i];\n }\n }\n } else if (hlo.opcode() == HloOpcode::kCopy) {\n CHECK(ShapeUtil::SameDimensions(src.shape(), dst->shape()));\n dst_logical = src_logical;\n } else if (hlo.opcode() == HloOpcode::kPad) {\n if (dst != &hlo && hlo.operand_index(dst) == 1) {\n continue;\n }\n const auto* pad = Cast(&hlo);\n dst_logical.resize(src_logical.size());\n for (int i = 0; i < src_logical.size(); ++i) {\n const int padding =\n pad->padding_config().dimensions(i).edge_padding_high();\n CHECK_EQ(pad->padding_config().dimensions(i).edge_padding_low(), 0);\n CHECK_EQ(pad->padding_config().dimensions(i).interior_padding(), 0);\n if (padding == 0) {\n dst_logical[i] = src_logical[i];\n } else {\n const std::vector& fragments = src_logical[i];\n CHECK_GE(fragments.size(), 2);\n CHECK(absl::c_all_of(fragments, [&](const Fragment* fragment) {\n return fragment->dst_dim_number() ==\n fragments.front()->dst_dim_number();\n }));\n std::vector non_trivial_fragments;\n absl::c_copy_if(fragments, std::back_inserter(non_trivial_fragments),\n [](const Fragment* fragment) {\n return fragment->full_count() > 1;\n });\n CHECK_EQ(non_trivial_fragments.size(), 2);\n new_fragments.emplace_back(\n non_trivial_fragments[0]->dst_dim_number(),\n non_trivial_fragments[0]->full_count() *\n non_trivial_fragments[1]->full_count() -\n padding);\n dst_logical[i] = {&new_fragments.back()};\n }\n }\n } else if (hlo.opcode() == HloOpcode::kSlice) {\n const auto slice = Cast(&hlo);\n dst_logical.resize(src_logical.size());\n for (int dim = 0; dim < src_logical.size(); ++dim) {\n dst_logical[dim] = src_logical[dim];\n if (slice->slice_limits(dim) - slice->slice_starts(dim) !=\n dst->shape().dimensions(dim)) {\n if (dst_logical[dim].size() > 1) {\n return FusionDecision::Forbid(\"Slicing of fragmented dimension.\");\n }\n auto fragment = dst_logical[dim].front();\n fragment->set_count(dst->shape().dimensions(dim));\n fragment->set_slice(\n fragment->slice_start() + slice->slice_starts(dim),\n fragment->sliced_count());\n }\n }\n } else if (hlo.opcode() == HloOpcode::kDynamicSlice) {\n if (dst != &hlo && hlo.operand_index(dst) >= 1) {\n continue;\n }\n const auto dynamic_slice = Cast(&hlo);\n dst_logical.resize(src_logical.size());\n for (int dim = 0; dim < src_logical.size(); ++dim) {\n dst_logical[dim] = src_logical[dim];\n if (dynamic_slice->slice_sizes(dim) != dst->shape().dimensions(dim)) {\n if (dst_logical[dim].size() > 1) {\n return FusionDecision::Forbid(\"Slicing of fragmented dimension.\");\n }\n auto fragment = dst_logical[dim].front();\n fragment->set_count(dst->shape().dimensions(dim));\n fragment->set_slice(fragment->slice_start(),\n dst->shape().dimensions(dim));\n }\n }\n } else {\n return FusionDecision::Forbid(\"Function called on a wrong instruction.\");\n }\n absl::flat_hash_map src_to_dst;\n Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();\n FragmentOrders& dst_dim_fragments_order =\n dst_dim_order.DimFragmentsOrders();\n absl::flat_hash_set dim_numbers_present_in_dst;\n for (const int64_t dim_idx : dst->shape().layout().minor_to_major()) {\n for (const Fragment* subdim : dst_logical[dim_idx]) {\n dst_fragments_order.push_back(*subdim);\n src_to_dst[subdim] = dst_fragments_order.size() - 1;\n dim_numbers_present_in_dst.insert(subdim->dst_dim_number());\n }\n }\n for (const auto& [dim_index, dim_sequence] :\n src_dim_order.DimFragmentsOrders()) {\n for (const int fragment_number : dim_sequence) {\n const auto it = src_to_dst.find(&src_fragments_order[fragment_number]);\n if (it == src_to_dst.cend()) {\n if (hlo.opcode() == HloOpcode::kBroadcast &&\n src_fragments_order[fragment_number].full_count() > 1 &&\n dim_numbers_present_in_dst.contains(dim_index)) {\n return FusionDecision::Forbid(\"Unsupported broadcast\");\n }\n continue;\n }\n dst_dim_fragments_order[dim_index].push_back(it->second);\n }\n }\n }\n return dst_dim_orders;\n}\nDimOrderMapOrError GetPropagatedDimOrders(const HloInstruction& hlo,\n const TransformDirection direction,\n const DimensionOrder& src_dim_order,\n const DotProperties& properties) {\n VLOG(7) << \"Analyzing \" << hlo.ToString();\n if (hlo.opcode() != HloOpcode::kParameter &&\n direction == TransformDirection::kOutputToInput &&\n absl::c_any_of(hlo.users(), [](const HloInstruction* user) {\n return (user->opcode() == HloOpcode::kConcatenate ||\n user->opcode() == HloOpcode::kDynamicSlice);\n })) {\n return FusionDecision::Forbid(\n \"No fusion into concatenations or dynamic slice.\");\n }\n if (hlo.opcode() == HloOpcode::kParameter ||\n hlo_query::IsScalarConstant(&hlo)) {\n CHECK(direction == TransformDirection::kOutputToInput);\n return DimOrderMap{};\n } else if (hlo.opcode() == HloOpcode::kTranspose ||\n hlo.opcode() == HloOpcode::kCopy) {\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kBroadcast) {\n if (direction != TransformDirection::kOutputToInput) {\n return FusionDecision::Forbid(\"Unsupported broadcast direction.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kPad) {\n if (direction != TransformDirection::kOutputToInput) {\n return FusionDecision::Forbid(\"Unsupported pad direction.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.operand_count() > 0 &&\n legacy_triton::IsTritonSupportedElementwiseUpToFloatNormalization(\n hlo.opcode(), hlo.operand(0)->shape().element_type())) {\n return GetPropagatedDimOrdersForElementwise(hlo, direction, src_dim_order);\n } else if (hlo.opcode() == HloOpcode::kBitcast) {\n return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kSlice) {\n if (direction != TransformDirection::kOutputToInput) {\n return FusionDecision::Forbid(\"Unsupported slice direction.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kDynamicSlice &&\n direction == TransformDirection::kOutputToInput) {\n if (CodegenDecision decision = legacy_triton::IsTritonSupportedDynamicSlice(\n *Cast(&hlo));\n !decision.CanFuse()) {\n return decision;\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kReshape) {\n if (!ShapeUtil::ReshapeIsBitcast(hlo.operand(0)->shape(), hlo.shape())) {\n return FusionDecision::Forbid(\"Non-bitcast reshape.\");\n }\n return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kConcatenate &&\n direction == TransformDirection::kOutputToInput) {\n int64_t noncontracting_dim_label = properties.noncontracting_dimension;\n const FragmentOrders& src_dim_fragments_orders =\n src_dim_order.DimFragmentsOrders();\n auto noncontracting_dim_fragment_order_it =\n src_dim_fragments_orders.find(noncontracting_dim_label);\n if (noncontracting_dim_fragment_order_it !=\n src_dim_fragments_orders.end()) {\n if (noncontracting_dim_fragment_order_it->second.size() > 1) {\n return FusionDecision::Forbid(\n \"Concatenations on split non-contracting dimensions are \"\n \"unsupported.\");\n }\n }\n auto dim = LogicalIndexOfLabeledDimension(hlo.shape(), src_dim_order,\n noncontracting_dim_label);\n if (!dim.has_value() || dim.value() != hlo.concatenate_dimension()) {\n return FusionDecision::Forbid(\"Unsupported concatenation.\");\n }\n if (absl::c_any_of(hlo.operands(), [&hlo](const HloInstruction* operand) {\n constexpr int kMinConcatFragmentSize = 64;\n return operand->shape().dimensions(hlo.concatenate_dimension()) %\n kMinConcatFragmentSize !=\n 0;\n })) {\n return FusionDecision::Forbid(\n \"At least one operand of concatenation can not be perfectly tiled.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n }\n return FusionDecision::Forbid(\"Unimplemented instruction.\");\n}\nint64_t InputMinusOutputBytes(const HloInstruction& hlo) {\n CHECK(!hlo.shape().IsTuple());\n int64_t input_size = 0;\n for (const HloInstruction* operand : hlo.operands()) {\n CHECK(!operand->shape().IsTuple());\n input_size += ShapeUtil::ByteSizeOf(operand->shape());\n }\n return input_size - ShapeUtil::ByteSizeOf(hlo.shape());\n}\nbool CanNotBeFusedIntoAUser(const HloInstruction& hlo) {\n return hlo.IsRoot() || (hlo.user_count() == 1 && hlo.users()[0]->IsRoot() &&\n hlo.users()[0]->opcode() == HloOpcode::kTuple);\n}\nconstexpr int kIoToleranceBytes = 1024;\nbool IsInputWorthFusing(const HloInstruction& hlo) {\n if (InputMinusOutputBytes(hlo) <= kIoToleranceBytes) {\n return true;\n }\n if (hlo.user_count() > 1) {\n return false;\n }\n if (hlo.opcode() == HloOpcode::kSlice &&\n hlo_query::AllOperandsAreParametersOrConstants(hlo)) {\n return true;\n }\n return hlo_query::AllOperandsAreParametersOrConstantsWithSingleUser(hlo);\n}\nbool IsOutputWorthFusing(const HloInstruction& hlo) {\n return CanNotBeFusedIntoAUser(hlo) ||\n InputMinusOutputBytes(hlo) >= -kIoToleranceBytes;\n}\nFusionDecision IsConversionWorthFusing(const HloInstruction& input,\n se::GpuComputeCapability gpu_version) {\n if (ShapeUtil::ByteSizeOf(input.operand(0)->shape()) >\n ShapeUtil::ByteSizeOf(input.shape())) {\n return FusionDecision::Forbid(\"Narrowing conversion.\");\n }\n return FusionDecision::Allow();\n}\n} \nDimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirements(\n const HloInstruction& hlo, const DimensionOrder& src_dim_order,\n TransformDirection direction, const DotProperties& properties) {\n DimOrderMapOrError propagated_dim_orders_or_error =\n GetPropagatedDimOrders(hlo, direction, src_dim_order, properties);\n if (std::holds_alternative(propagated_dim_orders_or_error)) {\n return std::get(propagated_dim_orders_or_error);\n }\n DimOrderMap propagated_dim_orders =\n std::move(std::get(propagated_dim_orders_or_error));\n DotRequirementsOrError requirements_or_error =\n GetRequirementsIfSupportedOrders(hlo, propagated_dim_orders, properties);\n if (std::holds_alternative(requirements_or_error)) {\n return std::get(requirements_or_error);\n }\n return DimOrdersAndReqs{propagated_dim_orders,\n std::get(requirements_or_error)};\n}\nDimOrdersAndReqsOrError\nGetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(\n const HloInstruction& hlo, TransformDirection transform_direction,\n const std::optional& src_operand_index,\n const DimensionOrder& src_dim_order,\n const se::GpuComputeCapability& gpu_version,\n const DotProperties& properties) {\n CHECK_EQ(transform_direction == TransformDirection::kInputToOutput,\n src_operand_index.has_value());\n if (hlo.opcode() == HloOpcode::kTuple ||\n hlo.opcode() == HloOpcode::kGetTupleElement) {\n return FusionDecision::Forbid(\"Unsupported instruction.\");\n }\n if (hlo.opcode() == HloOpcode::kReduce ||\n hlo.opcode() == HloOpcode::kAllReduce ||\n hlo.opcode() == HloOpcode::kAllReduceStart ||\n hlo.opcode() == HloOpcode::kAllReduceDone) {\n return FusionDecision::Forbid(\"Reductions are not fused yet.\");\n }\n if (hlo.opcode() == HloOpcode::kPad) {\n return FusionDecision::Forbid(\"Pads are not fused yet.\");\n }\n if (auto decision =\n legacy_triton::IsTritonSupportedInstruction(hlo, gpu_version);\n !decision.CanFuse()) {\n return decision;\n }\n DimOrdersAndReqsOrError result_or_error =\n GetPropagatedDimOrdersAndRequirements(hlo, src_dim_order,\n transform_direction, properties);\n if (std::holds_alternative(result_or_error)) {\n VLOG(5) << \"Not fusing \" << hlo.ToString()\n << \" to the output due to the decision: \"\n << std::get(result_or_error).Explain();\n return result_or_error;\n }\n DimOrdersAndReqs dim_orders_and_requirements =\n std::move(std::get(result_or_error));\n int fusion_level =\n hlo.GetModule()->config().debug_options().xla_gpu_triton_fusion_level();\n if (transform_direction == TransformDirection::kOutputToInput) {\n if (fusion_level < 2) {\n if (hlo.opcode() == HloOpcode::kConvert) {\n if (FusionDecision decision = IsConversionWorthFusing(hlo, gpu_version);\n !decision) {\n return decision;\n }\n } else if (hlo.IsElementwise() && hlo.opcode() != HloOpcode::kCopy) {\n return FusionDecision::Forbid(\"Ignored elementwise operation\");\n }\n } else {\n bool accepted = false;\n if (hlo.IsElementwise() && hlo.operand_count() == 2) {\n for (const HloInstruction* operand : hlo.operands()) {\n if (operand->opcode() == HloOpcode::kBroadcast &&\n (operand->operand(0)->opcode() == HloOpcode::kParameter ||\n operand->operand(0)->opcode() == HloOpcode::kConstant) &&\n std::holds_alternative(\n GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(\n *operand, TransformDirection::kOutputToInput,\n std::nullopt,\n dim_orders_and_requirements.dim_orders.at(operand),\n gpu_version, properties))) {\n accepted = true;\n break;\n }\n }\n }\n if (!accepted && !IsInputWorthFusing(hlo)) {\n return FusionDecision::Forbid(\n \"Not obviously profitable to fuse as input.\");\n }\n }\n } else {\n if (fusion_level < 2) {\n return FusionDecision::Forbid(\n \"Skipping fusing outputs at low fusion levels.\");\n }\n for (int i = 0; i < hlo.operand_count(); ++i) {\n const HloInstruction* operand = hlo.operand(i);\n if (i == *src_operand_index) {\n continue;\n }\n if ((operand->opcode() == HloOpcode::kBroadcast &&\n ShapeUtil::IsScalar(operand->operand(0)->shape())) ||\n operand->opcode() == HloOpcode::kParameter) {\n continue;\n }\n return FusionDecision::Forbid(\n \"Has multiple inputs - not properly analyzed yet.\");\n }\n if (!IsOutputWorthFusing(hlo)) {\n return FusionDecision::Forbid(\n \"Not obviously profitable to fuse as output.\");\n }\n }\n return dim_orders_and_requirements;\n}\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/triton_tiling_propagation.h\"\n#include \n#include \n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla::gpu {\nnamespace {\nusing TritonTilingPropagationTest = HloTestBase;\nusing triton_fusion::DimensionOrder;\nDimensionOrder FromFragments(DimensionOrder::Fragments fragments) {\n DimensionOrder dim_order;\n DimensionOrder::Fragments& tensor_fragments_order =\n dim_order.TensorFragmentsOrder();\n DimensionOrder::FragmentOrders& dim_fragments_orders =\n dim_order.DimFragmentsOrders();\n for (const DimensionOrder::Fragment& fragment : fragments) {\n tensor_fragments_order.push_back(fragment);\n dim_fragments_orders[fragment.dst_dim_number()].push_back(\n tensor_fragments_order.size());\n }\n return dim_order;\n}\nTEST_F(\n TritonTilingPropagationTest,\n DimensionOrdersRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {\n DimensionOrder::Fragment fragment_1(0, 97);\n DimensionOrder::Fragment fragment_2(0, 1);\n DimensionOrder dimension_order_1 = FromFragments({fragment_1, fragment_2});\n DimensionOrder::Fragment fragment_3(0, 97);\n DimensionOrder::Fragment fragment_4(1, 1);\n DimensionOrder dimension_order_2 = FromFragments({fragment_3, fragment_4});\n EXPECT_TRUE(dimension_order_1.IsPhysicallyEquivalent(dimension_order_2));\n}\nTEST_F(\n TritonTilingPropagationTest,\n IterationSpecsRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {\n TensorIterationSpec::IterationSpecFragment fragment_1 = {\n 1, 97, 0, 97,\n {97}};\n TensorIterationSpec spec_1;\n spec_1[0].push_back(fragment_1);\n TensorIterationSpec::IterationSpecFragment fragment_2 = {\n 1, 97, 0, 97,\n {97}};\n TensorIterationSpec::IterationSpecFragment fragment_3 = {\n 97, 1, 0, 1,\n {1}};\n TensorIterationSpec spec_2;\n spec_2[0].push_back(fragment_2);\n spec_2[1].push_back(fragment_3);\n EXPECT_TRUE(spec_1.IsPhysicallyEquivalent(spec_2));\n}\nTEST_F(TritonTilingPropagationTest,\n DimensionsShouldNotBeRemovedByToTensorIterationSpec) {\n DimensionOrder::Fragment fragment_0(0, 97);\n DimensionOrder::Fragment fragment_1(1, 1);\n DimensionOrder dimension_order = FromFragments({fragment_0, fragment_1});\n TensorIterationSpec spec = dimension_order.ToTensorIterationSpec();\n const TensorIterationSpec::DimIterationSpec* dim_spec_0 = spec.Find(0);\n EXPECT_NE(dim_spec_0, nullptr);\n EXPECT_EQ(dim_spec_0->size(), 1);\n EXPECT_EQ(dim_spec_0->at(0).count, 97);\n const TensorIterationSpec::DimIterationSpec* dim_spec_1 = spec.Find(1);\n EXPECT_NE(dim_spec_1, nullptr);\n EXPECT_EQ(dim_spec_1->size(), 1);\n EXPECT_EQ(dim_spec_1->at(0).count, 1);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1180,"cells":{"ID":{"kind":"string","value":"22010223-07c7-4dc2-819a-01d14addc9b5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"ir_emission_utils"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/cpu/ir_emission_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/cpu/ir_emission_utils_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/cpu/ir_emission_utils.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/service/cpu/cpu_runtime.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/window_util.h\"\nnamespace xla {\nnamespace cpu {\nint64_t GetMinimumAlignmentForArray(\n const Shape& shape, const TargetMachineFeatures& target_machine_features) {\n CHECK(LayoutUtil::IsDenseArray(shape));\n int64_t allocation_size_bytes =\n ShapeUtil::ElementsIn(shape) *\n ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type());\n return target_machine_features.minimum_alignment_for_allocation(\n allocation_size_bytes);\n}\nbool PotentiallyImplementedAsEigenConvolution(\n const HloInstruction& convolution,\n const TargetMachineFeatures& target_machine_features) {\n const Shape& input_shape = convolution.operand(0)->shape();\n const Shape& kernel_shape = convolution.operand(1)->shape();\n const Shape& output_shape = convolution.shape();\n auto is_aligned = [&](const Shape& shape) {\n return GetMinimumAlignmentForArray(shape, target_machine_features) >=\n TargetMachineFeatures::kEigenExpectedTensorAlignment;\n };\n if (!is_aligned(input_shape) || !is_aligned(kernel_shape) ||\n !is_aligned(output_shape)) {\n return false;\n }\n if (ShapeUtil::IsZeroElementArray(input_shape) ||\n ShapeUtil::IsZeroElementArray(kernel_shape)) {\n return false;\n }\n CHECK(\n ShapeUtil::SameElementTypeIgnoringFpPrecision(input_shape, kernel_shape));\n PrimitiveType primitive_type = input_shape.element_type();\n if (primitive_type != F16 && primitive_type != F32) {\n return false;\n }\n if (window_util::HasWindowReversal(convolution.window())) {\n return false;\n }\n const ConvolutionDimensionNumbers& dnums =\n convolution.convolution_dimension_numbers();\n const int64_t num_spatial_dims = dnums.output_spatial_dimensions_size();\n if (num_spatial_dims < 1 || num_spatial_dims > 3) {\n return false;\n }\n for (int64_t i = 0; i < num_spatial_dims; ++i) {\n if (dnums.input_spatial_dimensions(i) != i + 1) {\n return false;\n }\n if (dnums.kernel_spatial_dimensions(i) != i) {\n return false;\n }\n if (dnums.output_spatial_dimensions(i) != i + 1) {\n return false;\n }\n }\n return dnums.input_batch_dimension() == 0 &&\n dnums.input_feature_dimension() == input_shape.dimensions_size() - 1 &&\n dnums.output_batch_dimension() == 0 &&\n dnums.output_feature_dimension() ==\n output_shape.dimensions_size() - 1 &&\n dnums.kernel_input_feature_dimension() ==\n kernel_shape.dimensions_size() - 2 &&\n dnums.kernel_output_feature_dimension() ==\n kernel_shape.dimensions_size() - 1;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/cpu/ir_emission_utils.h\"\n#include \n#include \"xla/service/cpu/target_machine_features_fake.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nusing IrEmitterTest = HloTestBase;\nTEST_F(IrEmitterTest, ConvWithZeroSizedKernelNotImplementedAsEigen) {\n const char* const hlo_string = R\"(\nHloModule ModuleWithConv\nENTRY Conv {\n input = f32[32,50,28,28]{3,2,1,0} parameter(0)\n kernel = f32[50,0,5,5]{3,2,1,0} parameter(1)\n ROOT convolution = f32[32,0,24,24]{3,2,1,0} convolution(input, kernel),\n window={size=5x5},\n dim_labels=bf01_io01->bf01\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n HloComputation* entry_computation = module->entry_computation();\n HloInstruction* conv_instr = entry_computation->root_instruction();\n cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(\n [](int64_t shape_size) {\n return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;\n });\n EXPECT_FALSE(cpu::PotentiallyImplementedAsEigenConvolution(\n *conv_instr, target_machine_features));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emission_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emission_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1181,"cells":{"ID":{"kind":"string","value":"90d52628-50f5-4b95-b9ab-5399df301b62"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"fusion_deduplication_cache"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/fusion_deduplication_cache.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/fusion_deduplication_cache_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/fusion_deduplication_cache.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/hash/hash.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor.h\"\n#include \"xla/hlo/ir/hlo_clone_context.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/shape_util.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nclass HloInstructionPtrHash {\n public:\n size_t operator()(const HloInstruction* instr) const {\n return absl::HashOf(*instr);\n }\n};\nclass HloInstructionPtrEq {\n public:\n size_t operator()(const HloInstruction* instr1,\n const HloInstruction* instr2) const {\n auto operands_eq = [](const HloInstruction* a, const HloInstruction* b) {\n if (a == b) return true;\n return ShapeUtil::Equal(a->shape(), b->shape());\n };\n auto eq_computations = [](const HloComputation* a,\n const HloComputation* b) { return *a == *b; };\n return instr1->Identical(*instr2, operands_eq, eq_computations);\n }\n};\n} \n FusionDeduplicationCache FusionDeduplicationCache::Create(\n const HloModule& module) {\n absl::flat_hash_map\n deduplicated_id_map;\n absl::flat_hash_map instruction_id_map;\n int64_t instruction_count = module.instruction_count();\n deduplicated_id_map.reserve(instruction_count);\n instruction_id_map.reserve(instruction_count);\n int64_t next_id = 0;\n for (const HloComputation* computation : module.computations()) {\n for (const HloInstruction* instruction : computation->instructions()) {\n auto it = deduplicated_id_map.emplace(instruction, next_id);\n if (it.second) {\n ++next_id;\n }\n instruction_id_map[instruction] = it.first->second;\n }\n }\n return FusionDeduplicationCache(next_id, std::move(instruction_id_map));\n}\nFusionDeduplicationCache::InstructionId\nFusionDeduplicationCache::GetInstructionId(const HloInstruction& instruction) {\n return instruction_id_map_.at(&instruction);\n}\nFusionDeduplicationCache::FusionId FusionDeduplicationCache::GetFusionId(\n const HloInstruction& producer, const HloInstruction& consumer,\n int64_t consumer_operand_index) {\n FusionDeduplicationCache::FusionId fusion_id{GetInstructionId(producer),\n GetInstructionId(consumer),\n consumer_operand_index};\n if (fusion_id_map_.emplace(fusion_id, next_id_).second) {\n ++next_id_;\n }\n return fusion_id;\n}\nFusionDeduplicationCache::FusionId FusionDeduplicationCache::GetFusionId(\n const HloInstruction& producer, const HloInstruction& consumer) {\n return GetFusionId(producer, consumer, consumer.operand_index(&producer));\n}\nvoid FusionDeduplicationCache::UpdateFusedInstructionId(\n const HloInstruction& fusion_instruction,\n const HloInstruction& original_producer,\n const HloInstruction& original_consumer, int64_t consumer_operand_index) {\n instruction_id_map_[&fusion_instruction] = fusion_id_map_.at(GetFusionId(\n original_producer, original_consumer, consumer_operand_index));\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/fusion_deduplication_cache.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nHloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer) {\n HloComputation* computation = consumer->parent();\n HloInstruction* fusion_instruction = consumer;\n if (consumer->opcode() != HloOpcode::kFusion) {\n fusion_instruction =\n computation->AddInstruction(HloInstruction::CreateFusion(\n consumer->shape(), HloInstruction::FusionKind::kLoop, consumer));\n TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));\n }\n if (producer->opcode() == HloOpcode::kFusion) {\n fusion_instruction->MergeFusionInstruction(producer);\n } else {\n fusion_instruction->FuseInstruction(producer);\n }\n if (producer->user_count() == 0) {\n TF_CHECK_OK(computation->RemoveInstruction(producer));\n }\n return fusion_instruction;\n}\nusing FusionDeduplicationCacheTest = HloTestBase;\nTEST_F(FusionDeduplicationCacheTest, IdenticalInstructions_EqualId) {\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n ENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n add1 = f32[8] add(p0, p1)\n ROOT add2 = f32[8] add(add1, p1)\n })\"));\n FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);\n const HloInstruction* add2 = module->entry_computation()->root_instruction();\n const HloInstruction* add1 = add2->operand(0);\n EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2));\n}\nTEST_F(FusionDeduplicationCacheTest,\n IdenticalInstructionsInDifferentComputations_EqualId) {\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n computation.1 {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ROOT add1 = f32[8] add(p0, p1)\n }\n ENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n ROOT add2 = f32[8] add(p0, p0)\n })\"));\n FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);\n const HloInstruction* add1 =\n module->GetComputationWithName(\"computation.1\")->root_instruction();\n const HloInstruction* add2 = module->entry_computation()->root_instruction();\n EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2));\n}\nTEST_F(FusionDeduplicationCacheTest, IdenticalFusionInstructions_EqualId) {\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n ENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n log1 = f32[8] log(p0)\n add1 = f32[8] add(log1, p1)\n log2 = f32[8] log(add1)\n ROOT add2 = f32[8] add(log2, p0)\n })\"));\n HloComputation* entry_computation = module->entry_computation();\n auto* add1 = entry_computation->GetInstructionWithName(\"add1\");\n auto* add2 = entry_computation->GetInstructionWithName(\"add2\");\n auto* log1 = entry_computation->GetInstructionWithName(\"log1\");\n auto* log2 = entry_computation->GetInstructionWithName(\"log2\");\n FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);\n EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2));\n EXPECT_EQ(cache.GetInstructionId(*log1), cache.GetInstructionId(*log2));\n EXPECT_NE(cache.GetInstructionId(*add1), cache.GetInstructionId(*log1));\n EXPECT_EQ(cache.GetFusionId(*log1, *add1), cache.GetFusionId(*log2, *add2));\n HloInstruction* fusion1 = Fuse(log1, add1);\n cache.UpdateFusedInstructionId(*fusion1, *log1, *add1,\n 0);\n HloInstruction* fusion2 = Fuse(log2, add2);\n cache.UpdateFusedInstructionId(*fusion2, *log2, *add2,\n 0);\n EXPECT_EQ(cache.GetInstructionId(*fusion1), cache.GetInstructionId(*fusion2));\n}\nTEST_F(FusionDeduplicationCacheTest,\n DifferentConsumerOperandIndex_DifferentId) {\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n ENTRY main {\n p0 = f32[8] parameter(0)\n p1 = f32[8] parameter(1)\n log1 = f32[8] log(p0)\n add1 = f32[8] add(log1, p1)\n log2 = f32[8] log(add1)\n ROOT add2 = f32[8] add(p0, log2)\n })\"));\n HloComputation* entry_computation = module->entry_computation();\n auto* add1 = entry_computation->GetInstructionWithName(\"add1\");\n auto* add2 = entry_computation->GetInstructionWithName(\"add2\");\n auto* log1 = entry_computation->GetInstructionWithName(\"log1\");\n auto* log2 = entry_computation->GetInstructionWithName(\"log2\");\n FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module);\n EXPECT_NE(cache.GetFusionId(*log1, *add1), cache.GetFusionId(*log2, *add2));\n HloInstruction* fusion1 = Fuse(log1, add1);\n cache.UpdateFusedInstructionId(*fusion1, *log1, *add1,\n 0);\n HloInstruction* fusion2 = Fuse(log2, add2);\n cache.UpdateFusedInstructionId(*fusion2, *log2, *add2,\n 1);\n EXPECT_NE(cache.GetInstructionId(*fusion1), cache.GetInstructionId(*fusion2));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_deduplication_cache.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_deduplication_cache_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1182,"cells":{"ID":{"kind":"string","value":"bea3fc56-baf4-4f52-a00f-73076a7b90ad"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"fusion_process_dump"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/fusion_process_dump.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/fusion_process_dump_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/fusion_process_dump.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/gpu/fusion_process_dump.pb.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/tools/hlo_module_loader.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/path.h\"\n#include \"tsl/platform/protobuf.h\" \n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nHloInstruction* AddFusionInstruction(HloInstruction* producer,\n HloInstruction* consumer,\n HloComputation* computation,\n std::string_view fusion_name) {\n if (consumer->opcode() == HloOpcode::kFusion) {\n return consumer;\n }\n auto kind = HloInstruction::FusionKind::kLoop;\n auto fusion_instruction = computation->AddInstruction(\n HloInstruction::CreateFusion(consumer->shape(), kind, consumer),\n fusion_name);\n TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));\n return fusion_instruction;\n}\nHloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,\n HloComputation* computation,\n std::string_view fusion_name) {\n HloInstruction* fusion_instruction =\n AddFusionInstruction(producer, consumer, computation, fusion_name);\n if (producer->opcode() == HloOpcode::kFusion) {\n fusion_instruction->MergeFusionInstruction(producer);\n } else {\n fusion_instruction->FuseInstruction(producer);\n }\n if (producer->user_count() == 0) {\n TF_CHECK_OK(computation->RemoveInstruction(producer));\n }\n return fusion_instruction;\n}\nabsl::string_view GetProducerName(const FusionStep& step) {\n if (step.has_fusion()) {\n return step.fusion().producer_name();\n }\n if (step.has_update_priority()) {\n return step.update_priority().producer_name();\n }\n if (step.has_producer_ineligible()) {\n return step.producer_ineligible().producer_name();\n }\n LOG(FATAL) << \"Producer name not found in the current step.\";\n}\n} \nabsl::StatusOr FusionProcessDump::LoadFromFile(\n const std::string& path) {\n std::string format = std::string(tsl::io::Extension(path));\n std::string data;\n TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data));\n return FusionProcessDump::LoadFromData(data, format);\n}\nabsl::StatusOr FusionProcessDump::LoadFromData(\n const std::string& data, absl::string_view format) {\n FusionProcessDumpProto fusion_process_dump_proto;\n if (format == \"txt\" || format == \"pbtxt\") {\n if (!tsl::protobuf::TextFormat::ParseFromString(\n data, &fusion_process_dump_proto)) {\n return InvalidArgument(\"Failed to parse input as HLO protobuf text\");\n }\n } else if (format == \"pb\") {\n if (!fusion_process_dump_proto.ParseFromString(data)) {\n return InvalidArgument(\"Failed to parse input as HLO protobuf binary\");\n }\n } else {\n return InvalidArgument(\n \"Invalid format from file extension: '%s'. Expected: txt, pb, or pbtxt\",\n format);\n }\n return FusionProcessDump::LoadFromProto(fusion_process_dump_proto);\n}\nabsl::StatusOr FusionProcessDump::LoadFromProto(\n const FusionProcessDumpProto& fusion_process_dump_proto) {\n TF_ASSIGN_OR_RETURN(\n auto module,\n LoadModuleFromData(fusion_process_dump_proto.hlo_module_before_fusion(),\n \"txt\"));\n se::DeviceDescription gpu_device_info(\n fusion_process_dump_proto.gpu_device_info());\n absl::flat_hash_map\n instruction_name_to_computation_map;\n for (HloComputation* computation : module->MakeNonfusionComputations()) {\n for (HloInstruction* instr : computation->instructions()) {\n instruction_name_to_computation_map[instr->name()] = computation;\n }\n }\n return FusionProcessDump(std::move(fusion_process_dump_proto),\n std::move(module), std::move(gpu_device_info),\n std::move(instruction_name_to_computation_map));\n}\nHloComputation* FusionProcessDump::GetCurrentComputation() {\n return instruction_name_to_computation_map_.at(\n GetProducerName(CurrentStep()));\n}\nHloInstruction* FusionProcessDump::GetInstructionWithName(\n absl::string_view name) {\n return instruction_name_to_computation_map_[name]->GetInstructionWithName(\n name);\n}\nHloInstruction* FusionProcessDump::GetProducer() {\n return GetInstructionWithName(GetProducerName(CurrentStep()));\n}\nabsl::InlinedVector FusionProcessDump::GetConsumers() {\n auto& step = CurrentStep();\n if (step.has_fusion()) {\n return {GetInstructionWithName(step.fusion().consumer_name())};\n }\n if (step.has_update_priority()) {\n absl::InlinedVector consumers;\n for (const auto& consumer_name : step.update_priority().consumer_names()) {\n consumers.push_back(GetInstructionWithName(consumer_name));\n }\n return consumers;\n }\n return {};\n}\nconst FusionStep& FusionProcessDump::CurrentStep() {\n CHECK(HasNext());\n return fusion_process_dump_proto_.fusion_steps(current_step_idx_);\n}\nbool FusionProcessDump::HasNext() {\n return current_step_idx_ < fusion_process_dump_proto_.fusion_steps_size();\n}\nvoid FusionProcessDump::Advance() {\n auto step = CurrentStep();\n if (step.has_fusion()) {\n const auto& fusion_step = step.fusion();\n auto* computation = GetCurrentComputation();\n HloInstruction* producer =\n computation->GetInstructionWithName(fusion_step.producer_name());\n HloInstruction* consumer =\n computation->GetInstructionWithName(fusion_step.consumer_name());\n HloInstruction* fusion =\n Fuse(producer, consumer, computation, fusion_step.fusion_name());\n instruction_name_to_computation_map_[fusion->name()] = computation;\n last_fusion_ = fusion;\n }\n ++current_step_idx_;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/fusion_process_dump.h\"\n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/service/gpu/fusion_process_dump.pb.h\"\n#include \"xla/service/gpu/gpu_device_info_for_tests.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace m = ::xla::match;\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing FusionProcessDumpTest = HloTestBase;\nvoid AddFusion(FusionProcessDumpProto& dump_proto,\n const std::string& fusion_name, const std::string& producer_name,\n const std::string& consumer_name) {\n auto step = dump_proto.add_fusion_steps();\n auto fusion_step = step->mutable_fusion();\n fusion_step->set_fusion_name(fusion_name);\n fusion_step->set_producer_name(producer_name);\n fusion_step->set_consumer_name(consumer_name);\n}\nTEST_F(FusionProcessDumpTest, MultipleFusionSteps) {\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n ENTRY main {\n p0 = f32[] parameter(0)\n p1 = f32[] parameter(1)\n add = f32[] add(p0, p1)\n subtract = f32[] subtract(p0, p1)\n abs = f32[] abs(subtract)\n ROOT multiply = f32[] multiply(add, abs)\n })\"));\n FusionProcessDumpProto dump_proto;\n *dump_proto.mutable_gpu_device_info() =\n TestGpuDeviceInfo::RTXA6000DeviceInfo().ToGpuProto();\n dump_proto.set_hlo_module_before_fusion(\n module->ToString(HloPrintOptions::ShortParsable()));\n AddFusion(dump_proto, \"fusion.1\", \"subtract\", \"abs\");\n AddFusion(dump_proto, \"fusion.2\", \"fusion.1\", \"multiply\");\n AddFusion(dump_proto, \"fusion.2\", \"add\", \"fusion.2\");\n TF_ASSERT_OK_AND_ASSIGN(auto fusion_process_dump,\n FusionProcessDump::LoadFromProto(dump_proto));\n fusion_process_dump.Advance();\n fusion_process_dump.Advance();\n fusion_process_dump.Advance();\n EXPECT_FALSE(fusion_process_dump.HasNext());\n auto root =\n fusion_process_dump.module()->entry_computation()->root_instruction();\n EXPECT_EQ(root->name(), \"fusion.2\");\n ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));\n EXPECT_THAT(root->fused_expression_root(),\n GmockMatch(m::Multiply(\n m::Add(m::Parameter(), m::Parameter()),\n m::Abs(m::Subtract(m::Parameter(), m::Parameter())))));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1183,"cells":{"ID":{"kind":"string","value":"1559345f-112d-4a23-82b6-d268bfab2164"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"kernel_reuse_cache"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/kernel_reuse_cache.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/kernel_reuse_cache.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/service/gpu/executable.pb.h\"\n#include \"xla/service/gpu/kernel_arguments.h\"\n#include \"xla/service/gpu/launch_dimensions.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/stream_executor/launch_dim.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nstd::string GetArgumentFingerprint(\n absl::Span kernel_arguments) {\n return absl::StrJoin(\n kernel_arguments, \",\", [](std::string* s, const KernelArgument& arg) {\n if (arg.first_with_same_slice().has_value()) {\n absl::StrAppend(s, \"=\", arg.first_with_same_slice().value());\n return;\n }\n absl::StrAppend(s, arg.alignment());\n if (arg.aliased()) {\n absl::StrAppend(s, \"a\");\n }\n if (arg.written()) {\n absl::StrAppend(s, \"w\");\n }\n });\n}\n} \nstd::string GetComputationFingerprint(\n const HloComputation* fused_computation,\n absl::Span kernel_arguments,\n absl::string_view discriminator) {\n auto print_options = HloPrintOptions::Fingerprint()\n .set_print_only_essential_constants(false)\n .set_print_operand_shape(false);\n return absl::StrCat(discriminator, \"(\",\n GetArgumentFingerprint(kernel_arguments), \")\",\n fused_computation->ToString(print_options));\n}\nabsl::Status KernelReuseCache::Load(const CompilationCacheProto& proto) {\n for (const auto& [name, entry] : proto.entries()) {\n std::optional cluster_dim;\n if (entry.has_cluster_dim()) {\n cluster_dim =\n se::ClusterDim{entry.cluster_dim().x(), entry.cluster_dim().y(),\n entry.cluster_dim().z()};\n }\n TF_RET_CHECK(\n cache_\n .insert(\n {entry.fingerprint(),\n Entry{name,\n LaunchDimensions{\n entry.launch_dimensions().num_blocks(),\n entry.launch_dimensions().num_threads_per_block()},\n cluster_dim, entry.shmem_bytes(), entry.binary()}})\n .second);\n }\n return absl::OkStatus();\n}\nCompilationCacheProto KernelReuseCache::Export() const {\n CompilationCacheProto proto;\n for (const auto& [fingerprint, cache_entry] : cache_) {\n if (!hits_.contains(fingerprint)) {\n VLOG(5) << \"Not exporting unused \" << cache_entry.kernel_name;\n continue;\n }\n auto [it, inserted] = proto.mutable_entries()->emplace(\n cache_entry.kernel_name, CompilationCacheEntryProto{});\n CHECK(inserted) << cache_entry.kernel_name;\n CompilationCacheEntryProto& proto_entry = it->second;\n proto_entry.set_fingerprint(fingerprint);\n LaunchDimensionsProto launch_dimensions_proto;\n launch_dimensions_proto.set_num_blocks(\n cache_entry.launch_dimensions.num_blocks());\n launch_dimensions_proto.set_num_threads_per_block(\n cache_entry.launch_dimensions.num_threads_per_block());\n *proto_entry.mutable_launch_dimensions() = launch_dimensions_proto;\n if (cache_entry.cluster_dim.has_value()) {\n ClusterDimProto cluster_dim_proto;\n cluster_dim_proto.set_x(cache_entry.cluster_dim->x);\n cluster_dim_proto.set_y(cache_entry.cluster_dim->y);\n cluster_dim_proto.set_z(cache_entry.cluster_dim->z);\n *proto_entry.mutable_cluster_dim() = cluster_dim_proto;\n }\n proto_entry.set_shmem_bytes(cache_entry.shmem_bytes);\n proto_entry.set_binary(cache_entry.binary);\n }\n return proto;\n}\nabsl::Status UpdateDiskKernelCache(\n absl::string_view path, const bool do_append,\n const CompilationCacheProto& current_cache,\n absl::Span binaries_to_cache) {\n CompilationCacheProto disk_cache;\n if (do_append) {\n std::string serialized;\n TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(),\n std::string(path), &serialized));\n if (!disk_cache.ParseFromString(std::string(serialized))) {\n return Internal(\"Failed to parse serialized CompilationCacheProto.\");\n }\n }\n auto entries = disk_cache.mutable_entries();\n int stored_kernel_count = 0;\n for (const auto& [name, binary] : binaries_to_cache) {\n auto it_current = current_cache.entries().find(name);\n TF_RET_CHECK(it_current != current_cache.entries().end());\n auto [it_disk, inserted] = entries->insert({name, it_current->second});\n TF_RET_CHECK(inserted);\n TF_RET_CHECK(!binary.empty());\n it_disk->second.set_binary(reinterpret_cast(binary.data()),\n binary.size());\n VLOG(5) << \"Cached kernel: \" << name << \": \" << binary.size();\n ++stored_kernel_count;\n }\n if (stored_kernel_count > 0) {\n TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(),\n std::string(path),\n disk_cache.SerializeAsString()));\n VLOG(2) << \"Stored \" << stored_kernel_count << \" / \"\n << binaries_to_cache.size() << \" kernels in the cache file.\";\n }\n return absl::OkStatus();\n}\nstd::pair, bool>\nKernelReuseCache::GetWithStatus(\n const HloComputation* fused_computation,\n absl::Span kernel_arguments,\n absl::string_view discriminator,\n const std::function()>& generator) {\n std::string fingerprint = GetComputationFingerprint(\n fused_computation, kernel_arguments, discriminator);\n VLOG(4) << \"Fingerprint: \";\n XLA_VLOG_LINES(4, fingerprint);\n return GetWithStatus(std::move(fingerprint), generator);\n}\nstd::pair, bool>\nKernelReuseCache::GetWithStatus(\n std::string fingerprint,\n const std::function()>& generator) {\n hits_.insert(fingerprint);\n auto it = cache_.find(fingerprint);\n if (it != cache_.end()) {\n return {&it->second, true};\n }\n absl::StatusOr entry = generator();\n if (entry.ok()) {\n it =\n cache_.insert({std::move(fingerprint), std::move(entry.value())}).first;\n return {&it->second, false};\n }\n return {entry.status(), false};\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/kernel_reuse_cache.h\"\n#include \n#include \"absl/log/check.h\"\n#include \"xla/service/gpu/executable.pb.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/env.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing KernelReuseTest = ::testing::Test;\nTEST_F(KernelReuseTest, ExportAndLoadWork) {\n KernelReuseCache cache;\n EXPECT_TRUE(cache.IsEmpty());\n auto [result, was_cached] = cache.GetWithStatus(\n \"fingerprint\", []() { return KernelReuseCache::Entry{}; });\n TF_EXPECT_OK(result);\n EXPECT_NE(result.value(), nullptr);\n EXPECT_FALSE(was_cached);\n EXPECT_FALSE(cache.IsEmpty());\n const CompilationCacheProto proto = cache.Export();\n cache.Clear();\n EXPECT_TRUE(cache.IsEmpty());\n TF_EXPECT_OK(cache.Load(proto));\n EXPECT_FALSE(cache.IsEmpty());\n}\nTEST_F(KernelReuseTest, UpdatingDiskKernelCacheWorks) {\n std::string cache_file_path;\n CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_path));\n {\n const CompilationCacheProto proto = [](std::string kernel_name) {\n KernelReuseCache cache;\n auto [result, was_cached] = cache.GetWithStatus(\"fingerprint\", [&]() {\n return KernelReuseCache::Entry{.kernel_name = kernel_name};\n });\n return cache.Export();\n }(\"k1\");\n TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, false,\n proto,\n {{.name = \"k1\", .binary = {5, 6}}}));\n }\n {\n const CompilationCacheProto proto = [](std::string kernel_name) {\n KernelReuseCache cache;\n auto [result, was_cached] = cache.GetWithStatus(\"fingerprint\", [&]() {\n return KernelReuseCache::Entry{.kernel_name = kernel_name};\n });\n return cache.Export();\n }(\"k2\");\n TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, true,\n proto,\n {{.name = \"k2\", .binary = {7, 8}}}));\n }\n std::string serialized;\n TF_EXPECT_OK(\n tsl::ReadFileToString(tsl::Env::Default(), cache_file_path, &serialized));\n CompilationCacheProto proto;\n EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));\n EXPECT_EQ(proto.entries_size(), 2);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1184,"cells":{"ID":{"kind":"string","value":"b0dfb30d-f1d3-464d-99a2-1da73dd33128"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"gpu_spmd_pipeline"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/gpu_spmd_pipeline.h\"\n#include \n#include \n#include \"absl/functional/function_ref.h\"\n#include \"absl/log/check.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_schedule.h\"\n#include \"xla/hlo/pass/hlo_pass_fix.h\"\n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/hlo/transforms/hlo_constant_splitter.h\"\n#include \"xla/service/algebraic_simplifier.h\"\n#include \"xla/service/conditional_simplifier.h\"\n#include \"xla/service/gather_expander.h\"\n#include \"xla/service/gpu/transforms/algebraic_simplifier.h\"\n#include \"xla/service/hlo_constant_folding.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/reshape_mover.h\"\n#include \"xla/service/scatter_expander.h\"\n#include \"xla/service/sharding_propagation.h\"\n#include \"xla/service/sort_simplifier.h\"\n#include \"xla/service/spmd/collective_permute_motion.h\"\n#include \"xla/service/spmd/shardy/shardy_xla_pass.h\"\n#include \"xla/service/spmd/stateful_rng_spmd_partitioner.h\"\n#include \"xla/service/tuple_simplifier.h\"\n#include \"xla/service/while_loop_constant_sinking.h\"\n#include \"xla/service/while_loop_simplifier.h\"\n#include \"xla/stream_executor/device_description.h\"\nnamespace xla {\nnamespace gpu {\nvoid AddSPMDPasses(\n const HloModule* hlo_module,\n const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,\n const se::GpuComputeCapability& compute_capability,\n HloPassPipeline& spmd_pipeline,\n std::optional>\n auto_sharding_func) {\n const int64_t num_partitions = hlo_module->config().num_partitions();\n CHECK_GE(num_partitions, 1);\n HloPassPipeline& spmd_simplify =\n spmd_pipeline.AddPass>(\"spmd-simplify\");\n spmd_simplify.AddPass(layout_insensitive_algsimp_opts,\n compute_capability);\n spmd_simplify.AddPass();\n spmd_simplify.AddPass();\n spmd_simplify.AddPass(\n ScatterExpander::kEliminateSimpleScatters);\n spmd_simplify.AddPass(\n GatherExpander::kEliminateSimpleGathers);\n spmd_simplify.AddPass();\n spmd_simplify.AddPass();\n ReshapeMoverOptions reshape_mover_options;\n reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;\n spmd_simplify.AddPass(reshape_mover_options);\n spmd_simplify.AddPass>(\n layout_insensitive_algsimp_opts, compute_capability);\n spmd_simplify.AddPass();\n spmd_simplify.AddPass();\n const HloModuleConfig& config = hlo_module->config();\n if (config.use_shardy_partitioner()) {\n spmd_pipeline.AddPass();\n } else {\n spmd_pipeline.AddPass();\n spmd_simplify.AddPass();\n if (auto_sharding_func.has_value()) {\n (*auto_sharding_func)(spmd_pipeline);\n }\n spmd_pipeline.AddPass(\n true, false,\n config.allow_spmd_sharding_propagation_to_output());\n }\n spmd_pipeline.AddPass(\n num_partitions, hlo_module->config().replica_count(),\n hlo_module->config()\n .debug_options()\n .xla_gpu_threshold_for_windowed_einsum_mib(),\n hlo_module->config()\n .debug_options()\n .xla_gpu_multi_streamed_windowed_einsum(),\n true,\n true);\n spmd_pipeline.AddPass();\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/gpu_spmd_pipeline.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"xla/client/executable_build_options.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/service/algebraic_simplifier.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nclass GpuSpmdPartitioningTest : public HloTestBase,\n public ::testing::WithParamInterface {\n public:\n absl::StatusOr> PartitionComputation(\n const char* hlo_module, int64_t num_devices) {\n HloModuleConfig config = GetModuleConfigForTest(\n 1, num_devices);\n config.set_num_partitions(num_devices);\n config.set_use_shardy_partitioner(UseShardy());\n TF_ASSIGN_OR_RETURN(auto module,\n ParseAndReturnVerifiedModule(hlo_module, config));\n HloPassPipeline spmd_pipeline(\"spmd-partitioner\");\n se::CudaComputeCapability ampere(8, 0);\n AlgebraicSimplifierOptions alg_simplifier_options;\n AddSPMDPasses(module.get(), alg_simplifier_options, ampere, spmd_pipeline,\n std::nullopt);\n TF_RETURN_IF_ERROR(spmd_pipeline.Run(module.get()).status());\n XLA_VLOG_LINES(10, module->ToString());\n return module;\n }\n protected:\n bool UseShardy() const { return GetParam(); }\n DebugOptions GetDebugOptionsForTest() override {\n DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();\n return debug_options;\n }\n};\nTEST_P(GpuSpmdPartitioningTest, DotWithEntryComputationLayout) {\n const char* const kHloModule = R\"(\n HloModule module,\n entry_computation_layout={(f32[8,16]{0,1}, f32[16,24]{1,0})\n ->f32[8,24]{1,0}}\n ENTRY main {\n %p0 = f32[8,16] parameter(0), sharding={devices=[1,8]<=[8]}\n %p1 = f32[16,24] parameter(1), sharding={devices=[8,1]<=[8]}\n ROOT %dot = f32[8,24] dot(%p0, %p1), lhs_contracting_dims={1},\n rhs_contracting_dims={0}\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n PartitionComputation(kHloModule, 8));\n EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(0),\n ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 2}, {0, 1}));\n EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(1),\n ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {1, 0}));\n EXPECT_EQ(module->config().entry_computation_layout().result_shape(),\n ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 24}, {1, 0}));\n}\nstd::string TestParamToString(\n const ::testing::TestParamInfo& param_info) {\n return param_info.param ? \"Shardy\" : \"GSPMD\";\n}\nINSTANTIATE_TEST_SUITE_P(All, GpuSpmdPartitioningTest,\n ::testing::Values(true, false), TestParamToString);\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1185,"cells":{"ID":{"kind":"string","value":"7dde5b77-37ca-4936-8496-28e66a591fb5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"buffer_comparator"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/buffer_comparator.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/buffer_comparator_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/buffer_comparator.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"Eigen/Core\"\n#include \"xla/service/gpu/launch_dimensions.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/shape.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/stream_executor/device_memory.h\"\n#include \"xla/stream_executor/device_memory_handle.h\"\n#include \"xla/stream_executor/kernel.h\"\n#include \"xla/stream_executor/stream.h\"\n#include \"xla/stream_executor/stream_executor.h\"\n#include \"xla/stream_executor/typed_kernel_factory.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/ml_dtypes.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\ntemplate \nusing ComparisonKernelT =\n se::TypedKernel, se::DeviceMemory,\n float, uint64_t, se::DeviceMemory>;\nstruct ComparisonParams {\n double relative_tol = 0.1;\n bool verbose = true;\n const Shape* shape = nullptr;\n se::Stream* stream = nullptr;\n se::DeviceMemoryBase current{};\n se::DeviceMemoryBase expected{};\n};\ntemplate \nstatic absl::StatusOr DeviceCompare(std::string_view kernel_name,\n void* kernel_symbol,\n const ComparisonParams& params) {\n se::StreamExecutor* executor = params.stream->parent();\n se::DeviceMemoryHandle out(executor, executor->AllocateScalar());\n TF_RETURN_IF_ERROR(\n params.stream->MemZero(out.memory_ptr(), sizeof(uint64_t)));\n if (params.current.size() != params.expected.size()) {\n return Internal(\"Mismatched buffer size: %d bytes vs. %d bytes\",\n params.current.size(), params.expected.size());\n }\n se::DeviceMemory current_typed(params.current);\n se::DeviceMemory expected_typed(params.expected);\n uint64_t buffer_size = current_typed.ElementCount();\n TF_ASSIGN_OR_RETURN(\n ComparisonKernelT comparison_kernel,\n (se::TypedKernelFactory<\n se::DeviceMemory, se::DeviceMemory, float,\n uint64_t, se::DeviceMemory>::Create(executor, kernel_name,\n kernel_symbol)));\n const se::DeviceDescription& gpu_device_info =\n executor->GetDeviceDescription();\n LaunchDimensions dim =\n CalculateLaunchDimensions(*params.shape, gpu_device_info);\n se::DeviceMemory as_uint64(out.memory());\n TF_RETURN_IF_ERROR(params.stream->ThenLaunch(\n dim.thread_counts_per_block(), dim.block_counts(), comparison_kernel,\n current_typed, expected_typed, static_cast(params.relative_tol),\n buffer_size, as_uint64));\n uint64_t result = -1;\n CHECK_EQ(out.memory().size(), sizeof(result));\n TF_RETURN_IF_ERROR(\n params.stream->Memcpy(&result, out.memory(), sizeof(result)));\n TF_RETURN_IF_ERROR(params.stream->BlockHostUntilDone());\n return result == 0;\n}\ntemplate \nstatic absl::StatusOr HostCompare(const ComparisonParams& params) {\n int64_t n = params.current.size() / sizeof(ElementType);\n std::vector host_current(n), host_expected(n);\n TF_RETURN_IF_ERROR(params.stream->Memcpy(host_current.data(), params.current,\n params.current.size()));\n TF_RETURN_IF_ERROR(params.stream->Memcpy(\n host_expected.data(), params.expected, params.expected.size()));\n TF_RETURN_IF_ERROR(params.stream->BlockHostUntilDone());\n const auto canonicalize = [](ComparisonType a) -> ComparisonType {\n if (std::is_same::value && a) {\n constexpr ComparisonType kMaxFp16Value = 65505;\n if (std::isnan(a)) {\n return a;\n }\n return std::max(-kMaxFp16Value, std::min(a, kMaxFp16Value));\n }\n return a;\n };\n int differences_seen = 0;\n for (int64_t i = 0; i < n && differences_seen < 10; ++i) {\n auto current_value = static_cast(host_current[i]);\n auto expected_value = static_cast(host_expected[i]);\n ComparisonType current_value_canonical = canonicalize(current_value);\n ComparisonType expected_value_canonical = canonicalize(expected_value);\n if (std::isnan(current_value_canonical) &&\n std::isnan(expected_value_canonical)) {\n continue;\n }\n if (std::isinf(current_value_canonical) &&\n std::isinf(expected_value_canonical) &&\n current_value_canonical == expected_value_canonical) {\n continue;\n }\n if (std::isfinite(current_value_canonical) !=\n std::isfinite(expected_value_canonical) ||\n !(std::abs(current_value_canonical - expected_value_canonical) /\n (std::max(std::abs(current_value_canonical),\n std::abs(expected_value_canonical)) +\n 1) <\n params.relative_tol)) {\n if (!params.verbose) return false; \n ++differences_seen;\n LOG(ERROR) << \"Difference at \" << i << \": \" << current_value\n << \", expected \" << expected_value;\n }\n }\n return differences_seen == 0;\n}\ntemplate \nstatic absl::StatusOr CompareEqualParameterized(\n std::string_view kernel_name, void* kernel_symbol,\n const ComparisonParams& params) {\n XLA_SCOPED_LOGGING_TIMER(\"BufferComparator::CompareEqual\");\n TF_ASSIGN_OR_RETURN(\n bool result, DeviceCompare(kernel_name, kernel_symbol, params));\n if (result) {\n return true;\n }\n TF_ASSIGN_OR_RETURN(bool host_return,\n (HostCompare(params)));\n CHECK_EQ(host_return, result)\n << \"Host comparison succeeded even though GPU comparison failed.\";\n return false;\n}\nabsl::StatusOr BufferComparator::CompareEqual(\n se::Stream* stream, se::DeviceMemoryBase current,\n se::DeviceMemoryBase expected) const {\n ComparisonParams params{relative_tol_, verbose_, &shape_,\n stream, current, expected};\n switch (shape_.element_type()) {\n#if GOOGLE_CUDA \n case xla::F8E4M3FN:\n return CompareEqualParameterized(\n \"fp8_e4m3fn_comparison\", buffer_comparator::fp8_e4m3fn_comparison(),\n params);\n case xla::F8E5M2:\n return CompareEqualParameterized(\n \"fp8_e5m2_comparison\", buffer_comparator::fp8_e5m2_comparison(),\n params);\n#endif \n#if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60200\n case xla::F8E4M3FNUZ:\n return CompareEqualParameterized(\n \"fp8_e4m3fnuz_comparison\",\n buffer_comparator::fp8_e4m3fnuz_comparison(), params);\n case xla::F8E5M2FNUZ:\n return CompareEqualParameterized(\n \"fp8_e5m2fnuz_comparison\",\n buffer_comparator::fp8_e5m2fnuz_comparison(), params);\n#endif \n case xla::F16:\n return CompareEqualParameterized(\n \"fp16_comparison\", buffer_comparator::fp16_comparison(), params);\n case xla::BF16:\n return CompareEqualParameterized(\n \"bf16_comparison\", buffer_comparator::bf16_comparison(), params);\n case xla::F32:\n return CompareEqualParameterized(\n \"fp32_comparison\", buffer_comparator::fp32_comparison(), params);\n case xla::F64:\n return CompareEqualParameterized(\n \"fp64_comparison\", buffer_comparator::fp64_comparison(), params);\n case xla::S8:\n return CompareEqualParameterized(\n \"int8_comparison\", buffer_comparator::int8_comparison(), params);\n case xla::S32:\n return CompareEqualParameterized(\n \"int32_comparison\", buffer_comparator::int32_comparison(), params);\n default:\n return Unimplemented(\"Unimplemented element type\");\n }\n}\nBufferComparator::BufferComparator(const Shape& shape, double tolerance,\n bool verbose)\n : shape_(shape), relative_tol_(tolerance), verbose_(verbose) {\n auto double_dim_size = [&]() {\n int64_t prev_zero_dim_size = shape_.dimensions(0);\n shape_.set_dimensions(0, prev_zero_dim_size * 2);\n };\n if (shape_.element_type() == PrimitiveType::C64) {\n shape_.set_element_type(PrimitiveType::F32);\n double_dim_size();\n } else if (shape_.element_type() == PrimitiveType::C128) {\n shape_.set_element_type(PrimitiveType::F64);\n double_dim_size();\n }\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/buffer_comparator.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"xla/primitive_util.h\"\n#include \"xla/service/gpu/stream_executor_util.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_memory.h\"\n#include \"xla/stream_executor/device_memory_handle.h\"\n#include \"xla/stream_executor/platform.h\"\n#include \"xla/stream_executor/platform_manager.h\"\n#include \"xla/stream_executor/stream.h\"\n#include \"xla/types.h\"\n#include \"tsl/platform/ml_dtypes.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nconstexpr double kDefaultTolerance = 0.1;\nclass BufferComparatorTest : public testing::Test {\n protected:\n BufferComparatorTest()\n#if GOOGLE_CUDA\n : platform_(se::PlatformManager::PlatformWithName(\"CUDA\").value()),\n#elif TENSORFLOW_USE_ROCM\n : platform_(se::PlatformManager::PlatformWithName(\"ROCM\").value()),\n#endif\n stream_exec_(platform_->ExecutorForDevice(0).value()) {\n }\n template \n bool CompareEqualBuffers(const std::vector& current,\n const std::vector& expected,\n double tolerance) {\n auto stream = stream_exec_->CreateStream().value();\n se::DeviceMemoryHandle current_buffer(\n stream_exec_, stream_exec_->AllocateArray(current.size()));\n se::DeviceMemoryHandle expected_buffer(\n stream_exec_,\n stream_exec_->AllocateArray(expected.size()));\n TF_CHECK_OK(stream->Memcpy(current_buffer.memory_ptr(), current.data(),\n current_buffer.memory().size()));\n TF_CHECK_OK(stream->Memcpy(expected_buffer.memory_ptr(), expected.data(),\n expected_buffer.memory().size()));\n TF_CHECK_OK(stream->BlockHostUntilDone());\n BufferComparator comparator(\n ShapeUtil::MakeShape(\n primitive_util::NativeToPrimitiveType(),\n {static_cast(current.size())}),\n tolerance);\n return comparator\n .CompareEqual(stream.get(), current_buffer.memory(),\n expected_buffer.memory())\n .value();\n }\n template \n bool CompareEqualFloatBuffers(const std::vector& lhs_float,\n const std::vector& rhs_float,\n double tolerance = kDefaultTolerance) {\n std::vector lhs(lhs_float.begin(), lhs_float.end());\n std::vector rhs(rhs_float.begin(), rhs_float.end());\n return CompareEqualBuffers(lhs, rhs, tolerance);\n }\n template \n bool CompareEqualComplex(const std::vector>& lhs,\n const std::vector>& rhs) {\n return CompareEqualBuffers>(lhs, rhs,\n kDefaultTolerance);\n }\n se::Platform* platform_;\n se::StreamExecutor* stream_exec_;\n};\nTEST_F(BufferComparatorTest, TestComplex) {\n EXPECT_FALSE(\n CompareEqualComplex({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));\n EXPECT_TRUE(CompareEqualComplex({{0.1, 0.2}, {2, 3}},\n {{0.1, 0.2}, {2.2, 3.3}}));\n EXPECT_TRUE(\n CompareEqualComplex({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 3}}));\n EXPECT_FALSE(\n CompareEqualComplex({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 3}}));\n EXPECT_FALSE(\n CompareEqualComplex({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}}));\n EXPECT_FALSE(\n CompareEqualComplex({{0.1, 0.2}, {2, 3}}, {{0.1, 6}, {2, 3}}));\n EXPECT_TRUE(CompareEqualComplex({{0.1, 0.2}, {2, 3}},\n {{0.1, 0.2}, {2.2, 3.3}}));\n EXPECT_FALSE(\n CompareEqualComplex({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 7}}));\n}\nTEST_F(BufferComparatorTest, TestNaNs) {\n EXPECT_TRUE(\n CompareEqualFloatBuffers({std::nanf(\"\")}, {std::nanf(\"\")}));\n EXPECT_TRUE(CompareEqualFloatBuffers({std::nanf(\"\")},\n {std::nanf(\"1234\")}));\n EXPECT_FALSE(CompareEqualFloatBuffers({std::nanf(\"\")}, {1.}));\n EXPECT_TRUE(\n CompareEqualFloatBuffers({std::nanf(\"\")}, {std::nanf(\"\")}));\n EXPECT_TRUE(\n CompareEqualFloatBuffers({std::nanf(\"\")}, {std::nanf(\"1234\")}));\n EXPECT_FALSE(CompareEqualFloatBuffers({std::nanf(\"\")}, {1.}));\n EXPECT_TRUE(\n CompareEqualFloatBuffers({std::nanf(\"\")}, {std::nanf(\"\")}));\n EXPECT_TRUE(\n CompareEqualFloatBuffers({std::nanf(\"\")}, {std::nanf(\"1234\")}));\n EXPECT_FALSE(CompareEqualFloatBuffers({std::nanf(\"\")}, {1.}));\n}\nTEST_F(BufferComparatorTest, TestInfs) {\n const auto inf = std::numeric_limits::infinity();\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {std::nanf(\"\")}));\n EXPECT_TRUE(CompareEqualFloatBuffers({inf}, {inf}));\n EXPECT_TRUE(CompareEqualFloatBuffers({inf}, {65504}));\n EXPECT_TRUE(CompareEqualFloatBuffers({-inf}, {-65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {-20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {std::nanf(\"\")}));\n EXPECT_TRUE(CompareEqualFloatBuffers({inf}, {inf}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {-65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {-20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {std::nanf(\"\")}));\n EXPECT_TRUE(CompareEqualFloatBuffers({inf}, {inf}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {-65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {65504}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {-20}));\n#if GOOGLE_CUDA\n EXPECT_TRUE(\n CompareEqualFloatBuffers({inf}, {std::nanf(\"\")}));\n EXPECT_TRUE(CompareEqualFloatBuffers({inf}, {inf}));\n EXPECT_TRUE(CompareEqualFloatBuffers({inf}, {-inf}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {448}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-448}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-20}));\n EXPECT_FALSE(\n CompareEqualFloatBuffers({inf}, {std::nanf(\"\")}));\n EXPECT_TRUE(CompareEqualFloatBuffers({inf}, {inf}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-inf}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {57344}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {-57344}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({inf}, {-20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {20}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-inf}, {-20}));\n#endif \n}\nTEST_F(BufferComparatorTest, TestNumbers) {\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {20.1}));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {23.0}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {23.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {26.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({0}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({0.9}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({9}, {10}));\n EXPECT_TRUE(CompareEqualFloatBuffers({10}, {9}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {20.1}));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {23.0}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {23.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {26.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({0}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({0.9}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({9}, {10}));\n EXPECT_TRUE(CompareEqualFloatBuffers({10}, {9}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {20.1}));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {23.0}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {23.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {26.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({0}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({0.9}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({9}, {10}));\n EXPECT_TRUE(CompareEqualFloatBuffers({10}, {9}));\n EXPECT_TRUE(CompareEqualFloatBuffers({100}, {101}));\n EXPECT_FALSE(CompareEqualFloatBuffers({100}, {120}));\n EXPECT_TRUE(CompareEqualFloatBuffers({100}, {120}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({90}, {120}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({0}, {10}));\n EXPECT_TRUE(CompareEqualFloatBuffers({9}, {10}));\n EXPECT_TRUE(CompareEqualFloatBuffers({90}, {100}));\n EXPECT_TRUE(CompareEqualFloatBuffers({100}, {90}));\n EXPECT_FALSE(CompareEqualFloatBuffers({-128}, {127}));\n#if GOOGLE_CUDA\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {20.1}));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {23.0}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {23.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {26.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({0}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({0.9}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({9}, {10}));\n EXPECT_TRUE(CompareEqualFloatBuffers({9}, {10}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {20.1}));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {23.0}));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {23.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {30.0}, 0.2));\n EXPECT_FALSE(CompareEqualFloatBuffers({0}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({0.9}, {1}));\n EXPECT_TRUE(CompareEqualFloatBuffers({11}, {12}));\n EXPECT_TRUE(CompareEqualFloatBuffers({12}, {11}));\n#endif \n const double tol = 0.001;\n EXPECT_FALSE(CompareEqualFloatBuffers({0.9}, {1}, tol));\n EXPECT_TRUE(CompareEqualFloatBuffers({0.9}, {0.901}, tol));\n EXPECT_FALSE(CompareEqualFloatBuffers({10}, {10.1}, tol));\n EXPECT_TRUE(CompareEqualFloatBuffers({10}, {10.01}, tol));\n EXPECT_FALSE(CompareEqualFloatBuffers({100}, {101}, tol));\n EXPECT_FALSE(CompareEqualFloatBuffers({20}, {20.1}, tol));\n EXPECT_TRUE(CompareEqualFloatBuffers({20}, {20.01}, tol));\n}\nTEST_F(BufferComparatorTest, TestMultiple) {\n {\n EXPECT_TRUE(CompareEqualFloatBuffers(\n {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));\n std::vector lhs(200);\n std::vector rhs(200);\n for (int i = 0; i < 200; i++) {\n EXPECT_TRUE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the same at index \" << i;\n lhs[i] = 3;\n rhs[i] = 5;\n EXPECT_FALSE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the different at index \" << i;\n lhs[i] = 0;\n rhs[i] = 0;\n }\n }\n {\n EXPECT_TRUE(CompareEqualFloatBuffers(\n {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));\n std::vector lhs(200);\n std::vector rhs(200);\n for (int i = 0; i < 200; i++) {\n EXPECT_TRUE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the same at index \" << i;\n lhs[i] = 3;\n rhs[i] = 5;\n EXPECT_FALSE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the different at index \" << i;\n lhs[i] = 0;\n rhs[i] = 0;\n }\n }\n {\n EXPECT_TRUE(CompareEqualFloatBuffers(\n {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));\n std::vector lhs(200);\n std::vector rhs(200);\n for (int i = 0; i < 200; i++) {\n EXPECT_TRUE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the same at index \" << i;\n lhs[i] = 3;\n rhs[i] = 5;\n EXPECT_FALSE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the different at index \" << i;\n lhs[i] = 0;\n rhs[i] = 0;\n }\n }\n {\n EXPECT_TRUE(CompareEqualFloatBuffers({20, 30, 40, 50, 60},\n {21, 31, 41, 51, 61}));\n std::vector lhs(200);\n std::vector rhs(200);\n for (int i = 0; i < 200; i++) {\n EXPECT_TRUE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the same at index \" << i;\n lhs[i] = 3;\n rhs[i] = 5;\n EXPECT_FALSE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the different at index \" << i;\n lhs[i] = 0;\n rhs[i] = 0;\n }\n }\n#if GOOGLE_CUDA\n {\n EXPECT_TRUE(CompareEqualFloatBuffers(\n {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));\n std::vector lhs(200);\n std::vector rhs(200);\n for (int i = 0; i < 200; i++) {\n EXPECT_TRUE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the same at index \" << i;\n lhs[i] = 3;\n rhs[i] = 5;\n EXPECT_FALSE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the different at index \" << i;\n lhs[i] = 0;\n rhs[i] = 0;\n }\n }\n {\n EXPECT_TRUE(CompareEqualFloatBuffers(\n {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1}));\n std::vector lhs(200);\n std::vector rhs(200);\n for (int i = 0; i < 200; i++) {\n EXPECT_TRUE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the same at index \" << i;\n lhs[i] = 3;\n rhs[i] = 5;\n EXPECT_FALSE(CompareEqualFloatBuffers(lhs, rhs))\n << \"should be the different at index \" << i;\n lhs[i] = 0;\n rhs[i] = 0;\n }\n }\n#endif \n}\nTEST_F(BufferComparatorTest, BF16) {\n const int element_count = 3123;\n int64_t rng_state = 0;\n auto stream = stream_exec_->CreateStream().value();\n se::DeviceMemoryHandle lhs(\n stream_exec_,\n stream_exec_->AllocateArray(element_count));\n InitializeBuffer(stream.get(), BF16, &rng_state, lhs.memory());\n se::DeviceMemoryHandle rhs(\n stream_exec_,\n stream_exec_->AllocateArray(element_count));\n InitializeBuffer(stream.get(), BF16, &rng_state, rhs.memory());\n BufferComparator comparator(ShapeUtil::MakeShape(BF16, {element_count}));\n EXPECT_FALSE(comparator.CompareEqual(stream.get(), lhs.memory(), rhs.memory())\n .value());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_comparator.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_comparator_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1186,"cells":{"ID":{"kind":"string","value":"5befe34d-a742-4dd8-8eb2-5fbb362c43ee"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"gpu_fusible"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_fusible.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_fusible_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/gpu_fusible.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/permutation_util.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/gpu/hlo_fusion_analysis.h\"\n#include \"xla/service/gpu/hlo_traversal.h\"\n#include \"xla/service/gpu/ir_emission_utils.h\"\n#include \"xla/service/gpu/reduction_utils.h\"\n#include \"xla/service/hlo_dataflow_analysis.h\"\n#include \"xla/service/instruction_fusion.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/util.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nbool HasAnyTiledTransposeRoot(const HloComputation& computation) {\n return absl::c_any_of(GetFusionRoots(computation),\n [&](const HloInstruction* instr) {\n return GetDescriptionForTiledTransposeEmitter(\n FindNonTrivialHero(*instr))\n .has_value();\n });\n}\nconst Shape& GetElementShape(const HloFusionAnalysis& analysis) {\n const Shape* shape = &analysis.fusion_root(0).shape();\n while (shape->IsTuple()) {\n shape = &shape->tuple_shapes(0);\n }\n return *shape;\n}\nint ComputeMaxUnrollFactor(int64_t num_elements) {\n constexpr int kMaxUnrollFactor = 4;\n for (int i = kMaxUnrollFactor; i > 1; i /= 2) {\n if (num_elements % i == 0) {\n return i;\n }\n }\n return 1;\n}\n} \nbool IfFusedReadsElementsMultipleTimes(const HloInstruction& instr) {\n CHECK_NE(instr.opcode(), HloOpcode::kFusion) << \"`instr` has to be unfused.\";\n if (instr.opcode() == HloOpcode::kGather ||\n instr.opcode() == HloOpcode::kBroadcast) {\n return ShapeUtil::ElementsIn(instr.shape()) >\n ShapeUtil::ElementsIn(instr.operand(0)->shape());\n }\n if (instr.opcode() == HloOpcode::kReduceWindow) {\n for (const auto& dim : instr.window().dimensions()) {\n if (dim.size() > dim.stride()) {\n return true;\n }\n }\n }\n return false;\n}\nbool IsExpensiveToRepeat(const HloInstruction& instr) {\n CHECK_NE(instr.opcode(), HloOpcode::kFusion) << \"`instr` has to be unfused.\";\n constexpr int kMaxInputsPerOutput = 10;\n if (instr.opcode() == HloOpcode::kReduce &&\n !IsReductionFromOrToContiguousDimensions(instr)) {\n int64_t reduction_ratio = ShapeUtil::ElementsIn(instr.operand(0)->shape()) /\n ShapeUtil::ElementsIn(instr.shape());\n if (reduction_ratio > kMaxInputsPerOutput) return true;\n }\n if (instr.opcode() == HloOpcode::kReduceWindow) {\n int64_t reduction_ratio = 1;\n for (const auto& dim : instr.window().dimensions())\n reduction_ratio *= dim.size();\n if (reduction_ratio > kMaxInputsPerOutput) return true;\n }\n return false;\n}\nbool IsPhysicallyTransposing(const HloInstruction& instr) {\n if (instr.opcode() == HloOpcode::kFusion) {\n for (const HloInstruction* fused_instr : instr.fused_instructions()) {\n if (IsPhysicallyTransposing(*fused_instr)) {\n return true;\n }\n }\n }\n return instr.opcode() == HloOpcode::kCopy ||\n (instr.opcode() == HloOpcode::kTranspose &&\n !ShapeUtil::TransposeIsBitcast(instr.operand(0)->shape(),\n instr.shape(), instr.dimensions()));\n}\nnamespace {\nstd::pair MostMinorNonTrivialDimension(const Shape& shape) {\n int64_t position_of_first_non_trivial_dim = 0;\n for (int64_t dim : shape.layout().minor_to_major()) {\n if (shape.dimensions()[dim] > 1) {\n return {dim, position_of_first_non_trivial_dim};\n }\n ++position_of_first_non_trivial_dim;\n }\n return {-1, position_of_first_non_trivial_dim};\n}\n} \nbool TransposesMinorDimension(const HloInstruction* instr) {\n switch (instr->opcode()) {\n case HloOpcode::kFusion:\n return absl::c_any_of(instr->fused_instructions(),\n TransposesMinorDimension);\n case HloOpcode::kCopy: {\n int64_t first_non_trivial_operand_dim =\n MostMinorNonTrivialDimension(instr->operand(0)->shape()).first;\n int64_t first_non_trivial_output_dim =\n MostMinorNonTrivialDimension(instr->shape()).first;\n return first_non_trivial_operand_dim != first_non_trivial_output_dim;\n }\n case HloOpcode::kTranspose: {\n auto position_in_minor_to_major = InversePermutation(\n instr->operand(0)->shape().layout().minor_to_major());\n int64_t position_of_first_non_trivial_dim =\n MostMinorNonTrivialDimension(instr->operand(0)->shape()).second;\n for (int64_t output_dim : instr->shape().layout().minor_to_major()) {\n if (instr->shape().dimensions()[output_dim] == 1) {\n continue;\n }\n int64_t operand_dim = instr->dimensions().at(output_dim);\n return position_in_minor_to_major[operand_dim] >\n position_of_first_non_trivial_dim;\n }\n return false;\n }\n default:\n return false;\n }\n}\nbool IsReduceInputFusion(const HloInstruction& instr) {\n return instr.opcode() == HloOpcode::kFusion &&\n absl::c_any_of(GetFusionRoots(*instr.called_computations()[0]),\n [](const HloInstruction* root) {\n return IsRealReductionHero(*root,\n FindNonTrivialHero(*root));\n });\n}\nbool IsInputFusibleReduction(const HloInstruction& instr) {\n return IsReduceInputFusion(instr) ||\n IsReductionFromOrToContiguousDimensions(instr);\n}\nbool IsNestableVariadicReduction(const HloInstruction& instr) {\n return instr.shape().IsTuple() &&\n ((instr.opcode() == HloOpcode::kReduce &&\n !IsReductionFromOrToContiguousDimensions(instr)) ||\n (instr.opcode() == HloOpcode::kFusion &&\n instr.fusion_kind() == HloInstruction::FusionKind::kLoop &&\n instr.fused_expression_root()->opcode() == HloOpcode::kReduce));\n}\nbool IsInputFusibleTranspose(const HloInstruction& instr) {\n if (instr.opcode() == HloOpcode::kBitcast || instr.IsCustomFusion()) {\n return false;\n }\n if (instr.opcode() == HloOpcode::kFusion) {\n return HasAnyTiledTransposeRoot(*instr.fused_instructions_computation());\n }\n return GetDescriptionForTiledTransposeEmitter(instr).has_value();\n}\nconst HloInstruction* GetRealHeroForMultiOutputFusion(\n const HloInstruction& instr) {\n if (instr.opcode() != HloOpcode::kFusion) {\n return &instr;\n }\n auto fused_expression_root = instr.fused_expression_root();\n if (!instr.IsMultiOutputFusion()) {\n const auto& hero = FindNonTrivialHero(*fused_expression_root);\n if (IsRealReductionHero(*fused_expression_root, hero) ||\n GetDescriptionForTiledTransposeEmitter(hero).has_value()) {\n return &hero;\n }\n return fused_expression_root;\n }\n for (auto* inst : fused_expression_root->mutable_operands()) {\n const auto& hero = FindNonTrivialHero(*inst);\n if (IsRealReductionHero(*inst, hero) ||\n GetDescriptionForTiledTransposeEmitter(hero).has_value()) {\n return &hero;\n }\n }\n return fused_expression_root->operands()[0];\n}\nFusionDecision FusionHeroesAreCompatible(const HloInstruction* hero1,\n const HloInstruction* hero2) {\n auto hero1_is_unnested_reduce =\n IsReductionFromOrToContiguousDimensions(*hero1);\n auto tiled_transpose_hero1 = GetDescriptionForTiledTransposeEmitter(*hero1);\n bool hero1_is_unnested_transpose = tiled_transpose_hero1.has_value();\n bool hero2_is_unnested_reduce =\n IsReductionFromOrToContiguousDimensions(*hero2);\n auto tiled_transpose_hero2 = GetDescriptionForTiledTransposeEmitter(*hero2);\n bool hero2_is_unnested_transpose = tiled_transpose_hero2.has_value();\n if (hero1_is_unnested_reduce && hero2_is_unnested_reduce &&\n !AreReductionsMultiOutputFusionCompatible(hero2, hero1)) {\n return FusionDecision::Forbid(\"tiled reductions with different shapes\");\n } else if (hero1_is_unnested_transpose && hero2_is_unnested_transpose &&\n !tiled_transpose_hero1->IsEquivalent(*tiled_transpose_hero2)) {\n return FusionDecision::Forbid(\"tiled transposes with different shapes\");\n } else if ((hero1_is_unnested_transpose && hero2_is_unnested_reduce) ||\n (hero1_is_unnested_reduce && hero2_is_unnested_transpose)) {\n return FusionDecision::Forbid(\"MOF-fusion of a transpose and a reduction\");\n }\n if (hero1_is_unnested_transpose || hero2_is_unnested_transpose) {\n auto check_path_of_intermediate_ops = [](HloInstruction* param) {\n if (param->user_count() != 1) {\n return false;\n }\n HloInstruction* hlo = param->users()[0];\n while (hlo->user_count() > 0) {\n if (!IsIntermediate(hlo)) {\n return false;\n }\n hlo = hlo->users()[0];\n }\n return true;\n };\n HloInstruction* fusion1 = hero1->parent()->FusionInstruction();\n HloInstruction* fusion2 = hero2->parent()->FusionInstruction();\n if (fusion1 != nullptr && fusion2 != nullptr) {\n if (hero1_is_unnested_transpose && fusion2->IsUserOf(fusion1)) {\n int64_t operand_idx = fusion2->operand_index(fusion1);\n auto hlo = fusion2->fused_parameter(operand_idx);\n if (!check_path_of_intermediate_ops(hlo)) {\n return FusionDecision::Forbid(\"tiled transpose would become untiled\");\n }\n } else if (hero2_is_unnested_transpose && fusion1->IsUserOf(fusion2)) {\n int64_t operand_idx = fusion1->operand_index(fusion2);\n auto hlo = fusion1->fused_parameter(operand_idx);\n if (!check_path_of_intermediate_ops(hlo)) {\n return FusionDecision::Forbid(\"tiled transpose would become untiled\");\n }\n }\n }\n }\n return FusionDecision::Allow();\n}\nFusionDecision ShapesCompatibleForMultiOutputFusion(\n const HloInstruction& instr1, const HloInstruction& instr2) {\n auto get_loop_shape = [&](const HloInstruction* element_instr) {\n const auto& hero = element_instr->parent()->IsFusionComputation()\n ? FindNonTrivialHero(*element_instr)\n : *element_instr;\n if (IsReductionFromOrToContiguousDimensions(*element_instr) ||\n GetDescriptionForTiledTransposeEmitter(hero).has_value()) {\n return hero.operand(0)->shape();\n }\n return element_instr->shape();\n };\n const HloInstruction* hero1 = GetRealHeroForMultiOutputFusion(instr1);\n const HloInstruction* hero2 = GetRealHeroForMultiOutputFusion(instr2);\n if (auto compatible = FusionHeroesAreCompatible(hero1, hero2); !compatible) {\n return compatible;\n }\n const Shape& l1 = get_loop_shape(hero1);\n const Shape& l2 = get_loop_shape(hero2);\n bool accept_unequal_shape = !l1.IsTuple() && !l2.IsTuple();\n if (!ShapeUtil::EqualIgnoringElementType(l1, l2) &&\n (!accept_unequal_shape ||\n !ShapeUtil::IsReshapeOrTransposeBitcast(l1, l2,\n true))) {\n return FusionDecision::Forbid(\"different loop shapes\");\n }\n return FusionDecision::Allow();\n}\nbool IsInputFusibleScatter(const HloInstruction& instr) {\n if (instr.opcode() == HloOpcode::kScatter ||\n (instr.opcode() == HloOpcode::kFusion &&\n instr.fusion_kind() == HloInstruction::FusionKind::kInput &&\n instr.fused_expression_root()->opcode() == HloOpcode::kScatter)) {\n return true;\n }\n return false;\n}\nbool IsInputFusible(const HloInstruction& instr) {\n return instr.IsFusible() &&\n (IsInputFusibleReduction(instr) || IsInputFusibleScatter(instr) ||\n IsInputFusibleTranspose(instr));\n}\nbool IsUniversallyLoopFusible(const HloInstruction& instr) {\n if (instr.IsElementwise() && instr.operand_count() > 0 &&\n instr.opcode() != HloOpcode::kCopy) {\n return true;\n }\n switch (instr.opcode()) {\n case HloOpcode::kCopy:\n return !GetDescriptionForTiledTransposeEmitter(instr).has_value();\n case HloOpcode::kFusion:\n return instr.fusion_kind() == HloInstruction::FusionKind::kLoop;\n case HloOpcode::kBitcast:\n case HloOpcode::kBroadcast:\n case HloOpcode::kConcatenate:\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kDynamicUpdateSlice:\n case HloOpcode::kGather:\n case HloOpcode::kPad:\n case HloOpcode::kReduceWindow:\n case HloOpcode::kReshape:\n case HloOpcode::kReverse:\n case HloOpcode::kSlice:\n case HloOpcode::kTranspose:\n return true;\n default:\n return false;\n }\n}\nbool IsLoopFusibleAsConsumer(const HloInstruction& instr) {\n if (!instr.IsFusible()) return false;\n if (instr.opcode() == HloOpcode::kBitcast) return false;\n if (instr.opcode() == HloOpcode::kReduce) return true;\n if (!IsInputFusible(instr) && instr.opcode() == HloOpcode::kFusion &&\n instr.fusion_kind() == HloInstruction::FusionKind::kInput) {\n return true;\n }\n return IsUniversallyLoopFusible(instr);\n}\nbool IsLoopFusibleAsProducer(const HloInstruction& instr) {\n if (!instr.IsFusible()) return false;\n switch (instr.opcode()) {\n case HloOpcode::kIota:\n case HloOpcode::kConstant:\n return true;\n case HloOpcode::kReduce:\n return !instr.shape().IsTuple();\n default:\n return IsUniversallyLoopFusible(instr);\n }\n}\nstatic bool AllSatisfy(const HloInstruction& instr,\n const HloPredicate& predicate) {\n if (instr.opcode() != HloOpcode::kFusion) {\n return predicate(&instr);\n }\n return absl::c_all_of(\n instr.fused_instructions(), [&](const HloInstruction* i) {\n return i->opcode() == HloOpcode::kParameter || predicate(i);\n });\n}\nFusionDecision CanEmitInputFusedScatter(const HloInstruction& producer,\n const HloInstruction& consumer) {\n if (IsInputFusibleScatter(producer)) {\n return FusionDecision::Forbid(\"do not fuse into the output of scatter\");\n }\n if (!IsInputFusibleScatter(consumer)) {\n return FusionDecision::Allow();\n }\n const HloInstruction* inplace_operand;\n if (consumer.opcode() == HloOpcode::kFusion) {\n const HloInstruction* scatter = consumer.fused_expression_root();\n CHECK_EQ(scatter->opcode(), HloOpcode::kScatter);\n CHECK_EQ(scatter->operand(0)->opcode(), HloOpcode::kParameter);\n inplace_operand = consumer.operand(scatter->operand(0)->parameter_number());\n } else {\n inplace_operand = consumer.operand(0);\n }\n if (inplace_operand == &producer) {\n return FusionDecision::Forbid(\n \"do not fuse into the in-place operand of scatter\");\n }\n if (absl::c_linear_search(producer.operands(), inplace_operand)) {\n return FusionDecision::Forbid(\n \"Producer uses the in-place operand of a scatter\");\n }\n return FusionDecision::Allow();\n}\nFusionDecision IsProducerConsumerFusible(const HloInstruction& producer,\n const HloInstruction& consumer) {\n if (!IsLoopFusibleAsProducer(producer) &&\n !IsInputFusibleTranspose(producer)) {\n return FusionDecision::Forbid(\"the producer is not loop-fusible\");\n }\n if (IsInputFusibleReduction(producer)) {\n if (!producer.GetModule()\n ->config()\n .debug_options()\n .xla_gpu_enable_reduction_epilogue_fusion()) {\n return FusionDecision::Forbid(\n \"Reduction epilogue fusion is not enabled.\");\n }\n const HloInstruction& reduce_hero =\n producer.opcode() == HloOpcode::kFusion\n ? FindNonTrivialHero(*producer.fused_expression_root())\n : producer;\n if (!ReductionIsRaceFree(\n reduce_hero.GetModule()->config(),\n GetReductionKindAndContiguousComponents(reduce_hero))) {\n return FusionDecision::Forbid(\n \"Reduction output fusion only works for race free reductions\");\n }\n if (!AllSatisfy(consumer, [](const HloInstruction* hlo) {\n return IsIntermediate(hlo, 1);\n })) {\n return FusionDecision::Forbid(\n \"Reductions from/to continuous dims epilogue not fusible\");\n }\n if (producer.user_count() > 1) {\n return FusionDecision::Forbid(\n \"reduction output fusion only works for single user\");\n }\n }\n if (auto can_fuse = CanEmitInputFusedScatter(producer, consumer); !can_fuse) {\n return can_fuse;\n }\n if (!IsInputFusible(consumer) && !IsLoopFusibleAsConsumer(consumer)) {\n return FusionDecision::Forbid(\n \"the consumer is not input-fusible and not loop-fusible\");\n }\n if (producer.IsMultiOutputFusion()) {\n return FusionDecision::Forbid(\n \"the producer is not fusible as it is a multi-output fusion\");\n }\n if (producer.opcode() == HloOpcode::kConstant &&\n (!ShapeUtil::IsEffectiveScalar(producer.shape()) ||\n consumer.opcode() != HloOpcode::kFusion)) {\n return FusionDecision::Forbid(\"not fusing constant\");\n }\n return InstructionFusion::ShouldFuseInPlaceOp(&producer, &consumer);\n}\nFusionDecision IsProducerMultiOutputFusible(const HloInstruction& producer) {\n if (producer.IsMultiOutputFusion()) {\n return FusionDecision::Forbid(\"Producer is a multi-output fusion\");\n }\n if (!HloDataflowAnalysis::GetInPlaceInputOutputPairs(&producer).empty()) {\n return FusionDecision::Forbid(\"In-place operations are present\");\n }\n if (!IsLoopFusibleAsProducer(producer)) {\n return FusionDecision::Forbid(\"producer is not loop-fusible\");\n }\n if (IsPhysicallyTransposing(producer)) {\n return FusionDecision::Forbid(\"producer is physically transposing\");\n }\n return FusionDecision::Allow();\n}\nstatic int64_t SharedMemoryUsageNoCache(const HloInstruction& instr) {\n if (instr.opcode() == HloOpcode::kFusion) {\n int64_t sum = 0;\n for (const HloInstruction* hlo :\n instr.fused_instructions_computation()->instructions()) {\n sum += SharedMemoryUsageNoCache(*hlo);\n }\n return sum;\n } else if (instr.opcode() == HloOpcode::kReduce &&\n IsReductionFromOrToContiguousDimensions(instr)) {\n ReductionDimensions reduction_info =\n GetReductionKindAndContiguousComponents(instr);\n int64_t primitive_size = ShapeUtil::ByteSizeOfPrimitiveType(\n instr.operand(0)->shape().element_type());\n int num_variadic =\n instr.shape().IsTuple() ? instr.shape().tuple_shapes_size() : 1;\n if (reduction_info.is_row_reduction) {\n return 32 * primitive_size * num_variadic;\n } else {\n return 4 * 32 * 33 * primitive_size * num_variadic;\n }\n } else if (auto tr = GetDescriptionForTiledTransposeEmitter(instr)) {\n int64_t primitive_size =\n ShapeUtil::ByteSizeOfPrimitiveType(instr.shape().element_type());\n int64_t bytes_required = 32 * 33 * primitive_size;\n if (tr->permutation.back() == tr->permutation.size() - 1) {\n bytes_required *= tr->dimensions.back();\n }\n return bytes_required;\n }\n return 0;\n}\nint64_t FusionInfoCache::GetSharedMemoryUsage(const HloInstruction& instr) {\n {\n absl::MutexLock lock(&mutex_);\n auto it = shared_memory_usage_.find(&instr);\n if (it != shared_memory_usage_.end()) {\n return it->second;\n }\n }\n int64_t shared_memory_usage = SharedMemoryUsageNoCache(instr);\n absl::MutexLock lock(&mutex_);\n shared_memory_usage_.emplace(&instr, shared_memory_usage);\n return shared_memory_usage;\n}\nint64_t SharedMemoryUsage(const HloInstruction& instr, FusionInfoCache* cache) {\n if (!cache) {\n return SharedMemoryUsageNoCache(instr);\n }\n return cache->GetSharedMemoryUsage(instr);\n}\nconstexpr int64_t kMaxUnnestedReductionOutputsPerFusion = 8;\nstatic int64_t NumUnnestedReductionsNoCache(const HloInstruction& instr) {\n if (instr.opcode() == HloOpcode::kReduce &&\n IsReductionFromOrToContiguousDimensions(instr)) {\n return 1;\n }\n if (instr.opcode() == HloOpcode::kFusion) {\n int64_t sum = 0;\n for (const HloInstruction* hlo :\n instr.fused_instructions_computation()->instructions()) {\n sum += NumUnnestedReductionsNoCache(*hlo);\n }\n return sum;\n }\n return 0;\n}\nint64_t FusionInfoCache::GetNumUnnestedReductions(const HloInstruction& instr) {\n {\n absl::MutexLock lock(&mutex_);\n auto it = num_unnested_reductions_.find(&instr);\n if (it != num_unnested_reductions_.end()) {\n return it->second;\n }\n }\n int64_t num_unnested_reductions = NumUnnestedReductionsNoCache(instr);\n absl::MutexLock lock(&mutex_);\n num_unnested_reductions_.emplace(&instr, num_unnested_reductions);\n return num_unnested_reductions;\n}\nstatic int64_t NumUnnestedReductions(const HloInstruction& instr,\n FusionInfoCache* cache) {\n if (!cache) {\n return NumUnnestedReductionsNoCache(instr);\n }\n return cache->GetNumUnnestedReductions(instr);\n}\nFusionDecision FusionFitsInBudget(const HloInstruction& instr1,\n const HloInstruction& instr2,\n const se::DeviceDescription& device_info,\n bool is_consumer_producer_fusion,\n FusionInfoCache* cache ) {\n if (SharedMemoryUsage(instr1, cache) + SharedMemoryUsage(instr2, cache) >\n device_info.shared_memory_per_block()) {\n return FusionDecision::Forbid(\n \"shared memory usage would be over the budget of \")\n << device_info.shared_memory_per_block() << \"B\";\n }\n if (NumUnnestedReductions(instr1, cache) +\n NumUnnestedReductions(instr2, cache) >\n kMaxUnnestedReductionOutputsPerFusion) {\n return FusionDecision::Forbid(\"over \")\n << kMaxUnnestedReductionOutputsPerFusion\n << \" unnested reductions in fusion\";\n }\n int64_t num_output_buffers = ShapeUtil::SubshapeCount(instr1.shape()) +\n ShapeUtil::SubshapeCount(instr2.shape());\n if (instr1.operand_count() + instr2.operand_count() - 1 +\n num_output_buffers <=\n MaxOperandsAndOutputsPerFusion()) {\n return FusionDecision::Allow();\n } else {\n VLOG(5) << \"Operand count of \" << \"(\" << instr1.ToString()\n << \" ) = \" << instr1.operand_count() << \" and ( \"\n << instr2.ToString() << \" ) = \" << instr2.operand_count()\n << \" and num_output_buffers = \" << num_output_buffers\n << \" is bigger than the bound of \"\n << MaxOperandsAndOutputsPerFusion();\n }\n absl::flat_hash_set operands(instr1.operands().begin(),\n instr1.operands().end());\n operands.insert(instr2.operands().begin(), instr2.operands().end());\n operands.erase(&instr1);\n operands.erase(&instr2);\n if (is_consumer_producer_fusion &&\n operands.size() <= instr1.operands().size()) {\n return FusionDecision::Allow();\n }\n if (operands.size() + num_output_buffers > MaxOperandsAndOutputsPerFusion()) {\n return FusionDecision::Forbid(\n \"Number of operands and output buffers is larger than allowed budget \"\n \"per fusion\");\n }\n return FusionDecision::Allow();\n}\nbool CreatesHeavyComputation(const HloInstruction& producer,\n const HloInstruction& consumer) {\n auto producer_is_heavy = [&](const HloInstruction& instr) {\n if (producer.opcode() != HloOpcode::kFusion) {\n return IsExpensiveToRepeat(producer);\n }\n for (const auto& instr : producer.fused_instructions()) {\n if (IsExpensiveToRepeat(*instr)) {\n return true;\n }\n }\n return false;\n };\n if (!producer_is_heavy(producer)) {\n return false;\n }\n if (consumer.opcode() != HloOpcode::kFusion) {\n return IfFusedReadsElementsMultipleTimes(consumer);\n }\n for (const HloInstruction* operand : consumer.operands()) {\n if (operand != &producer) {\n continue;\n }\n const HloInstruction* root =\n consumer.fused_instructions_computation()->parameter_instruction(\n consumer.operand_index(operand));\n std::stack dfs;\n dfs.push(root);\n absl::flat_hash_set visited;\n while (!dfs.empty()) {\n const HloInstruction* cur = dfs.top();\n dfs.pop();\n if (!visited.insert(cur).second) {\n continue;\n }\n if (IfFusedReadsElementsMultipleTimes(*cur)) {\n return true;\n }\n for (const auto& user : cur->users()) {\n if (visited.contains(user)) {\n continue;\n }\n dfs.push(user);\n }\n }\n }\n return false;\n}\nbool IsFusibleAsMultiOutputFusionRoot(const HloInstruction& instr) {\n return instr.IsFusible() && !instr.IsCustomFusion() &&\n (IsInputFusibleReduction(instr) || IsInputFusibleTranspose(instr) ||\n instr.IsLoopFusion() || \n instr.IsElementwise());\n}\nHloInstruction::FusionKind ChooseFusionKind(const HloInstruction& producer,\n const HloInstruction& consumer) {\n return (IsInputFusible(consumer) || IsInputFusible(producer))\n ? HloInstruction::FusionKind::kInput\n : HloInstruction::FusionKind::kLoop;\n}\nbool IsConsumerTheOnlyNonRootUser(const HloInstruction& instr,\n const HloInstruction& consumer) {\n return absl::c_all_of(instr.users(), [&](const HloInstruction* user) {\n if (user->opcode() == HloOpcode::kGetTupleElement) {\n return IsConsumerTheOnlyNonRootUser(*user, consumer);\n }\n return user == &consumer || user == user->parent()->root_instruction();\n });\n}\nsize_t GetInstrCountOfFusible(const HloInstruction& instr) {\n return instr.opcode() == HloOpcode::kFusion ? instr.fused_instruction_count()\n : 1;\n}\nabsl::InlinedVector GetOutputsOfFusible(\n const HloInstruction& instr) {\n if (instr.opcode() != HloOpcode::kFusion) {\n return {&instr};\n }\n HloInstruction* root = instr.fused_expression_root();\n if (root->opcode() != HloOpcode::kTuple) {\n return {root};\n } else {\n auto v = root->operands();\n return absl::InlinedVector(v.begin(), v.end());\n }\n}\nsize_t GetOutputSizeOfFusible(const HloInstruction& instr) {\n if (!instr.IsMultiOutputFusion()) {\n return 1;\n }\n const HloInstruction* root = instr.fused_expression_root();\n return ShapeUtil::TupleElementCount(root->shape());\n}\nstatic void GetFusionRootsRec(const HloInstruction* root,\n std::vector& out) {\n if (root->opcode() == HloOpcode::kGetTupleElement &&\n root->operand(0)->opcode() == HloOpcode::kTuple) {\n return GetFusionRootsRec(root->operand(0)->operand(root->tuple_index()),\n out);\n } else if (root->opcode() == HloOpcode::kGetTupleElement) {\n out.push_back(root->operand(0));\n } else if (root->opcode() == HloOpcode::kTuple) {\n for (int i = 0; i < root->operand_count(); i++) {\n GetFusionRootsRec(root->operand(i), out);\n }\n } else {\n out.push_back(root);\n }\n}\nstd::vector GetFusionRoots(\n const HloComputation& computation) {\n std::vector out;\n GetFusionRootsRec(computation.root_instruction(), out);\n return out;\n}\nbool IsGenericTritonFusion(const HloInstruction& instr) {\n return instr.opcode() == HloOpcode::kFusion &&\n instr.fusion_kind() == HloInstruction::FusionKind::kCustom &&\n instr.backend_config().ok() &&\n instr.backend_config()\n ->fusion_backend_config()\n .kind() == kTritonFusionKind;\n}\nbool MayPreventVectorization(const HloFusionAdaptor& fusion) {\n static constexpr int kMaxConcatArgumentsForUnrolling = 10;\n return HloAnyOf(fusion, [&](auto node) {\n switch (node.opcode()) {\n case HloOpcode::kReduceWindow:\n case HloOpcode::kSort:\n case HloOpcode::kDot:\n case HloOpcode::kSin:\n case HloOpcode::kCos:\n case HloOpcode::kTan:\n case HloOpcode::kPower:\n case HloOpcode::kAtan2:\n return true;\n case HloOpcode::kConcatenate:\n return node.instruction().operand_count() >\n kMaxConcatArgumentsForUnrolling;\n case HloOpcode::kReduce:\n return node.instruction().shape().tuple_shapes_size() > 1;\n default:\n return false;\n }\n });\n}\nstd::vector GetFusibleComputations(\n const HloModule& module,\n const absl::flat_hash_set& execution_threads) {\n auto result = module.MakeComputationPostOrder(execution_threads);\n absl::flat_hash_set computations_not_to_fuse;\n for (const auto* computation : result) {\n for (const auto* instr : computation->instructions()) {\n if (HloInstruction::MightHaveCalledComputations(instr->opcode()) &&\n instr->opcode() != HloOpcode::kWhile &&\n instr->opcode() != HloOpcode::kConditional &&\n instr->opcode() != HloOpcode::kFusion) {\n for (auto* called : instr->called_computations()) {\n computations_not_to_fuse.insert(called);\n }\n }\n }\n }\n result.erase(\n std::remove_if(result.begin(), result.end(),\n [&](HloComputation* computation) {\n return computation->IsFusionComputation() ||\n computations_not_to_fuse.contains(computation);\n }),\n result.end());\n return result;\n}\nLaunchDimensionsConfig ComputeLoopFusionConfig(\n const HloFusionAnalysis& analysis) {\n return ComputeLoopFusionConfig(analysis, GetElementShape(analysis));\n}\nLaunchDimensionsConfig ComputeLoopFusionConfig(\n const HloFusionAnalysis& analysis, const Shape& element_shape) {\n int unroll_factor = 1;\n int64_t num_elements = ShapeUtil::ElementsIn(element_shape);\n int64_t n_threads_max = analysis.device_info().threads_per_core_limit() *\n analysis.device_info().core_count();\n if (num_elements >= n_threads_max &&\n !MayPreventVectorization(analysis.fusion())) {\n unroll_factor = ComputeMaxUnrollFactor(num_elements);\n }\n CHECK(absl::has_single_bit(static_cast(unroll_factor)));\n unroll_factor = std::max(\n unroll_factor,\n CeilOfRatio(8, analysis.input_output_info().smallest_output_dtype_bits));\n CHECK(absl::has_single_bit(static_cast(unroll_factor)));\n VLOG(2) << \"Unroll factor: \" << unroll_factor;\n LaunchDimensionsConfig launch_config{unroll_factor};\n return launch_config;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/gpu_fusible.h\"\n#include \n#include \n#include \n#include \"absl/strings/str_cat.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nusing ::testing::ElementsAre;\nusing GpuFusibleTest = HloTestBase;\nconst char kModulePrefix[] = R\"(\n HloModule test_module\n scalar_add {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT add = f32[] add(lhs, rhs)\n })\";\nTEST_F(GpuFusibleTest, IsPhysicallyTransposing_ElementwiseProducer) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n p0 = f32[2,2,2]{2,1,0} parameter(0)\n c0 = f32[] constant(0)\n exp = f32[2,2,2]{2,1,0} exponential(p0)\n ROOT reduce = f32[2,2]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* exp =\n module->entry_computation()->root_instruction()->operand(0);\n ASSERT_EQ(exp->opcode(), HloOpcode::kExp);\n EXPECT_FALSE(IsPhysicallyTransposing(*exp));\n}\nTEST_F(GpuFusibleTest, IsPhysicallyTransposing_MixedLayoutProducer) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n mixed_input_layouts_computation {\n p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)\n p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)\n copy = f16[128,1024,32,32]{1,3,2,0} copy(p1.1)\n c0 = f16[] constant(0)\n broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}\n greater-than = pred[128,1024,32,32]{1,3,2,0} compare(copy, broadcast), direction=GT\n ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)\n }\n fused_reduce {\n p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)\n convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)\n c0.2 = f32[] constant(0)\n ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add\n }\n ENTRY entry {\n p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0)\n p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)\n loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation\n reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce\n ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* loop_fusion =\n module->entry_computation()->root_instruction()->operand(1);\n ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);\n EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));\n}\nTEST_F(GpuFusibleTest,\n IsPhysicallyTransposing_MixedLayoutProducerWithTrivialDim) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n mixed_input_layouts_computation {\n p0.1 = f16[128,1,32,32]{1,3,2,0} parameter(0)\n p1.1 = f16[128,1,32,32]{3,2,1,0} parameter(1)\n bitcast = f16[128,1,32,32]{1,3,2,0} bitcast(p1.1)\n c0 = f16[] constant(0)\n broadcast = f16[128,1,32,32]{1,3,2,0} broadcast(c0), dimensions={}\n greater-than = pred[128,1,32,32]{1,3,2,0} compare(bitcast, broadcast), direction=GT\n ROOT root = f16[128,1,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)\n }\n fused_reduce {\n p0.2 = f16[128,1,32,32]{1,3,2,0} parameter(0)\n convert = f32[128,1,32,32]{1,3,2,0} convert(p0.2)\n c0.2 = f32[] constant(0)\n ROOT reduce = f32[1]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add\n }\n ENTRY entry {\n p0 = f16[128,1,32,32]{1,3,2,0} parameter(0)\n p1 = f16[128,1,32,32]{3,2,1,0} parameter(1)\n loop_fusion = f16[128,1,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation\n reduce_fusion = f32[1]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce\n ROOT root = (f32[1]{0}, f16[128,1,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* loop_fusion =\n module->entry_computation()->root_instruction()->operand(1);\n ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);\n EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));\n}\nTEST_F(GpuFusibleTest, IsPhysicallyTransposing_CopyProducer) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduce {\n p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)\n c0.1 = f32[] constant(0)\n ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={0,2,3}, to_apply=scalar_add\n }\n ENTRY entry {\n p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)\n copy = f32[128,1024,32,32]{1,3,2,0} copy(p0)\n ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* copy =\n module->entry_computation()->root_instruction()->operand(0);\n ASSERT_EQ(copy->opcode(), HloOpcode::kCopy);\n EXPECT_TRUE(IsPhysicallyTransposing(*copy));\n}\nTEST_F(GpuFusibleTest, IsPhysicallyTransposing_PhysicalTranspose) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduce {\n p0.1 = f32[1024,128,32,32]{3,2,1,0} parameter(0)\n c0.1 = f32[] constant(0)\n ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={1,2,3}, to_apply=scalar_add\n }\n ENTRY entry {\n p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)\n copy = f32[1024,128,32,32]{3,2,1,0} transpose(p0), dimensions={1,0,2,3}\n ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* transpose =\n module->entry_computation()->root_instruction()->operand(0);\n ASSERT_EQ(transpose->opcode(), HloOpcode::kTranspose);\n EXPECT_TRUE(IsPhysicallyTransposing(*transpose));\n}\nTEST_F(GpuFusibleTest, IsPhysicallyTransposing_LayoutChangingFusionProducer) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n layout_changing_computation {\n p0.1 = f16[128,1024,32,32]{3,2,1,0} parameter(0)\n p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)\n c0 = f16[] constant(0)\n broadcast = f16[128,1024,32,32]{3,2,1,0} broadcast(c0), dimensions={}\n greater-than = pred[128,1024,32,32]{3,2,1,0} compare(p1.1, broadcast), direction=GT\n select = f16[128,1024,32,32]{3,2,1,0} select(greater-than, p0.1, broadcast)\n ROOT root = f16[128,1024,32,32]{1,3,2,0} copy(select)\n }\n fused_reduce {\n p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)\n convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)\n c0.2 = f32[] constant(0)\n ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add\n }\n ENTRY entry {\n p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)\n p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)\n loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=layout_changing_computation\n ROOT reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* loop_fusion =\n module->entry_computation()->root_instruction()->operand(0);\n ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kCopy);\n EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));\n}\nTEST_F(GpuFusibleTest,\n IsPhysicallyTransposing_ConsiderMaximumTrueRanksParamsOnly) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n broadcasting_computation {\n p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)\n p1.1 = f32[1,128,1,1]{3,2,1,0} parameter(1)\n reshape = f32[128]{0} reshape(p1.1)\n broadcast = f32[128,1024,32,32]{1,3,2,0} broadcast(reshape), dimensions={0}\n ROOT add = f32[128,1024,32,32]{1,3,2,0} add(p0.1, broadcast)\n }\n ENTRY entry {\n p0 = f32[128,1024,32,32]{1,3,2,0} parameter(0)\n p1 = f32[1,128,1,1]{3,2,1,0} parameter(1)\n loop_fusion = f32[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=broadcasting_computation\n c0.2 = f32[] constant(0)\n ROOT reduce = f32[1024]{0} reduce(loop_fusion, c0.2), dimensions={0,2,3}, to_apply=scalar_add\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* loop_fusion =\n module->entry_computation()->root_instruction()->operand(0);\n ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kAdd);\n EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));\n}\nTEST_F(GpuFusibleTest, TransposesMinorDimension) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)\n non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)\n transpose_minor_default = f32[10,20,40,30]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}\n no_transpose_minor_default = f32[10,20,40,30]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}\n transpose_major_default = f32[10,30,20,40]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}\n transpose_minor_non_default = f32[10,30,20,40]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}\n no_transpose_minor_non_default = f32[10,20,40,30]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}\n transpose_major_non_default = f32[10,20,40,30]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}\n ROOT r = tuple(transpose_minor_default, no_transpose_minor_default, transpose_major_default,\n transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)\n })\"));\n auto* tuple = (*module)->entry_computation()->root_instruction();\n EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));\n EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(4)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));\n}\nTEST_F(GpuFusibleTest, TransposesMinorDimensionSkipTrivialDimensions) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)\n non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)\n transpose_minor_default = f32[10,20,1,1]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}\n transpose_nontrivial_minor_default = f32[10,1,20,1]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}\n no_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}\n transpose_one_major_default = f32[1,20,10,1]{3,2,1,0} transpose(default_layout), dimensions={2,1,0,3}\n transpose_two_major_default = f32[20,10,1,1]{3,2,1,0} transpose(default_layout), dimensions={1,0,2,3}\n transpose_minor_non_default = f32[10,1,20,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}\n no_transpose_minor_non_default = f32[10,20,1,1]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}\n transpose_major_non_default = f32[10,20,1,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}\n ROOT r = tuple(transpose_minor_default, transpose_nontrivial_minor_default, no_transpose_minor_default, transpose_one_major_default, transpose_two_major_default,\n transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)\n })\"));\n auto* tuple = (*module)->entry_computation()->root_instruction();\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));\n EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));\n EXPECT_TRUE(TransposesMinorDimension(tuple->operand(4)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(6)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(7)));\n}\nTEST_F(GpuFusibleTest, CopyTransposesMinorDimension) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)\n non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)\n copy_transpose_minor_default = f32[10,20,30,40]{2,3,1,0} copy(default_layout)\n copy_no_transpose_minor_default = f32[10,20,30,40]{3,2,1,0} copy(default_layout)\n copy_transpose_minor_non_default = f32[10,20,30,40]{2,1,3,0} copy(non_default_layout)\n copy_no_transpose_minor_non_default = f32[10,20,30,40]{1,2,3,0} copy(non_default_layout)\n ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,\n copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)\n })\"));\n auto* tuple = (*module)->entry_computation()->root_instruction();\n EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));\n EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));\n}\nTEST_F(GpuFusibleTest, CopyTransposesMinorDimensionSkipTrivialDimensions) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)\n non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)\n copy_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} copy(default_layout)\n copy_no_transpose_minor_default = f32[10,20,1,1]{3,2,1,0} copy(default_layout)\n copy_transpose_minor_non_default = f32[10,20,1,1]{2,0,3,1} copy(non_default_layout)\n copy_no_transpose_minor_non_default = f32[10,20,1,1]{1,2,3,0} copy(non_default_layout)\n ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,\n copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)\n })\"));\n auto* tuple = (*module)->entry_computation()->root_instruction();\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));\n EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));\n EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));\n}\nTEST_F(GpuFusibleTest, IsReduceInputFusion_ReductionToVector) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n c0 = f32[] parameter(0)\n p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)\n ROOT reduce = f32[512]{0} reduce(p1, c0), dimensions={0,2,3}, to_apply=scalar_add\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);\n EXPECT_FALSE(IsReduceInputFusion(*reduce));\n EXPECT_TRUE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest, IsReduceInputFusion_ElementalReduction) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n c0 = f32[] parameter(0)\n p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(1)\n ROOT reduce = f32[512,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={3,0},\n to_apply=scalar_add\n })\"))\n .value();\n SCOPED_TRACE(module->ToString());\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);\n EXPECT_FALSE(IsReduceInputFusion(*reduce));\n EXPECT_FALSE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputInputReduceFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduction {\n c0 = f32[] constant(0)\n p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n ROOT reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add\n }\n ENTRY entry {\n p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n ROOT fusion = f32[128,512]{1,0} fusion(p0), kind=kInput, calls=fused_reduction\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);\n EXPECT_TRUE(IsReduceInputFusion(*reduce));\n EXPECT_TRUE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputLoopReduceFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduction {\n c0 = f32[] constant(0)\n p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n ROOT reduce = f32[8,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={1,3}, to_apply=scalar_add\n }\n ENTRY entry {\n p0 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n ROOT fusion = f32[8,5,1,1]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_reduction\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);\n EXPECT_FALSE(IsReduceInputFusion(*reduce));\n EXPECT_FALSE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputInputReduceFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduction {\n c0 = f32[] constant(0)\n p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n reduce.0 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add\n reduce.1 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add\n ROOT root = (f32[128,512]{1,0}, f32[128,512]{1,0}) tuple(reduce.0, reduce.1)\n }\n ENTRY entry {\n p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n ROOT fusion = (f32[128,512]{1,0}, f32[128,512]{1,0}) fusion(p0), kind=kInput, calls=fused_reduction\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);\n EXPECT_TRUE(IsReduceInputFusion(*reduce));\n EXPECT_TRUE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest,\n IsReduceInputFusion_MultiOutputInputReduceFusionWithExtraOutputs) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduction {\n c0 = f32[] constant(0)\n p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add\n mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)\n ROOT root = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)\n }\n ENTRY entry {\n p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n ROOT fusion = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_reduction\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);\n EXPECT_TRUE(IsReduceInputFusion(*reduce));\n EXPECT_TRUE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputLoopReduceFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduction {\n c0 = f32[] constant(0)\n p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n reduce.0 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add\n reduce.1 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add\n ROOT root = (f32[512,28]{1,0}, f32[512,28]{1,0}) tuple(reduce.0, reduce.1)\n }\n ENTRY entry {\n p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n ROOT fusion = (f32[512,28]{1,0}, f32[512,28]{1,0}) fusion(p0), kind=kLoop, calls=fused_reduction\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);\n EXPECT_FALSE(IsReduceInputFusion(*reduce));\n EXPECT_FALSE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest,\n IsReduceInputFusion_MultiOutputLoopFusionReduceAndElementwiseOp) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduction {\n c0 = f32[] constant(0)\n p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n reduce = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add\n mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)\n ROOT root = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)\n }\n ENTRY entry {\n p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)\n ROOT fusion = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_reduction\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction();\n ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);\n EXPECT_FALSE(IsReduceInputFusion(*reduce));\n EXPECT_FALSE(IsInputFusibleReduction(*reduce));\n}\nTEST_F(GpuFusibleTest, CustomFusionIsNotFusibleAsConsumer) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\ntriton_fusion {\n p = s32[20,3] parameter(0)\n ROOT neg = s32[20,3] negate(p)\n}\nENTRY e {\n p = s32[20,3] parameter(0)\n ROOT r = s32[20,3] fusion(p), kind=kCustom, calls=triton_fusion\n})\"));\n const HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*root));\n}\nTEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionCompatible) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[64,32]{1,0} parameter(0)\n neg = f32[64,32]{1,0} negate(p0.1)\n ROOT transpose = f32[32,64]{1,0} transpose(neg), dimensions={1,0}\n }\n fused_computation_2 {\n p0.2 = f32[32,64]{1,0} parameter(0)\n neg = f32[32,64]{1,0} negate(p0.2)\n ROOT add = f32[32,64]{1,0} add(neg, neg)\n }\n ENTRY entry {\n p0 = f32[64,32]{1,0} parameter(0)\n fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1\n ROOT fusion.2 = f32[32,64]{1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction();\n const HloInstruction* fusion_2 = fusion_1->operand(0);\n EXPECT_TRUE(FusionHeroesAreCompatible(fusion_1->fused_expression_root(),\n fusion_2->fused_expression_root()));\n EXPECT_TRUE(FusionHeroesAreCompatible(fusion_2->fused_expression_root(),\n fusion_1->fused_expression_root()));\n}\nTEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionNotCompatible) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[64,32]{1,0} parameter(0)\n neg = f32[64,32]{1,0} negate(p0.1)\n bc = f32[1,64,32]{2,1,0} bitcast(neg)\n transpose = f32[1,32,64]{2,1,0} transpose(bc), dimensions={0,2,1}\n ROOT bc2 = f32[32,64]{1,0} bitcast(transpose)\n }\n fused_computation_2 {\n p0.2 = f32[32,64]{1,0} parameter(0)\n broadcast = f32[32,64,4]{2,1,0} broadcast(p0.2), dimensions={0,1}\n ROOT add = f32[32,64,4]{2,1,0} add(broadcast, broadcast)\n }\n ENTRY entry {\n p0 = f32[64,32]{1,0} parameter(0)\n fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1\n ROOT fusion.2 = f32[32,64,4]{2,1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction();\n const HloInstruction* fusion_2 = fusion_1->operand(0);\n EXPECT_FALSE(\n FusionHeroesAreCompatible(fusion_1->fused_expression_root(),\n fusion_2->fused_expression_root()->operand(0)));\n EXPECT_FALSE(\n FusionHeroesAreCompatible(fusion_2->fused_expression_root()->operand(0),\n fusion_1->fused_expression_root()));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_LoopFusions) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[6400]{0} parameter(0)\n ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)\n }\n fused_computation_2 {\n p0.2 = f32[6400]{0} parameter(0)\n const.2 = f32[] constant(1)\n broadcast = f32[6400]{0} broadcast(const.2), dimensions={}\n ROOT div = f32[6400]{0} divide(p0.2, broadcast)\n }\n ENTRY entry {\n p0 = f32[6400]{0} parameter(0)\n fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1\n fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2\n ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_IgnoreFpPrecision) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[6400]{0} parameter(0)\n ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)\n }\n fused_computation_2 {\n p0.2 = f32[6400]{0} parameter(0)\n ROOT convert = f16[6400]{0} convert(p0.2)\n }\n ENTRY entry {\n p0 = f32[6400]{0} parameter(0)\n fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1\n fusion.2 = f16[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2\n ROOT root = (f32[6400]{0}, f16[6400]{0}) tuple(fusion.1, fusion.2)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_BitcastCompatible) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[6400]{0} parameter(0)\n ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)\n }\n fused_computation_2 {\n p0.2 = f32[6400]{0} parameter(0)\n bitcast = f32[1,6400]{1,0} bitcast(p0.2)\n ROOT convert = f16[1,6400]{1,0} convert(bitcast)\n }\n ENTRY entry {\n p0 = f32[6400]{0} parameter(0)\n fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1\n fusion.2 = f16[1,6400]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_2\n ROOT root = (f32[6400]{0}, f16[1,6400]{1,0}) tuple(fusion.1, fusion.2)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Reduce) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[6400]{0} parameter(0)\n ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)\n }\n ENTRY entry {\n p0 = f32[6400]{0} parameter(0)\n fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1\n const.2 = f32[] constant(0)\n reduce = f32[] reduce(p0, const.2), dimensions={0}, to_apply=scalar_add\n ROOT root = (f32[6400]{0}, f32[]) tuple(fusion.1, reduce)\n })\"))\n .value();\n const HloInstruction* fusion =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *reduce));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Elementwise) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[6400]{0} parameter(0)\n ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)\n }\n ENTRY entry {\n p0 = f32[6400]{0} parameter(0)\n fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1\n const.2 = f32[] constant(1)\n broadcast = f32[6400]{0} broadcast(const.2), dimensions={}\n div = f32[6400]{0} divide(p0, broadcast)\n ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div)\n })\"))\n .value();\n const HloInstruction* fusion =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* div =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *div));\n}\nTEST_F(GpuFusibleTest,\n ShapesCompatibleForMultiOutputFusion_MultiOutputLoopFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)\n exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)\n ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)\n }\n fused_computation_2 {\n p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n const.2 = f32[] constant(0)\n broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={}\n ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)\n }\n ENTRY entry {\n p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1\n fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2\n gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0\n gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1\n ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0)->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(2);\n EXPECT_NE(fusion_1, fusion_2);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest,\n ShapesCompatibleForMultiOutputFusion_DifferentElementType) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_computation_1 {\n p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)\n exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)\n ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)\n }\n fused_computation_2 {\n p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n const.2 = f32[] constant(0)\n broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={}\n add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)\n ROOT convert = s32[8,1,5,16,1,1]{5,4,3,2,1,0} convert(add)\n }\n ENTRY entry {\n p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)\n fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1\n fusion.2 = s32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2\n gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0\n gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1\n ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, s32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0)->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(2);\n EXPECT_NE(fusion_1, fusion_2);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_UnfusedOps) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY reduce {\n p0 = f32[32,32,32]{2,1,0} parameter(0)\n c0 = f32[] constant(0)\n exp = f32[32,32,32]{2,1,0} exponential(p0)\n reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},\n to_apply=scalar_add\n ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* exp =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_DifferentLayouts) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY reduce {\n p0 = f32[2,2,2]{2,1,0} parameter(0)\n p1 = f32[2,2,2]{0,1,2} parameter(1)\n c0 = f32[] constant(0)\n exp = f32[2,2,2]{2,1,0} exponential(p0)\n reduce = f32[2,2]{0,1} reduce(p1, c0), dimensions={2}, to_apply=scalar_add\n ROOT root = (f32[2,2]{0,1}, f32[2,2,2]{2,1,0}) tuple(reduce, exp)\n })\"))\n .value();\n const HloInstruction* reduce =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* exp =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp));\n}\nTEST_F(\n GpuFusibleTest,\n ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsNotCompatible) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_021_transpose {\n param_0 = f32[20,20,20]{2,1,0} parameter(0)\n transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1}\n ROOT bitcast = f32[8000]{0} bitcast(transpose)\n }\n fused_220_transpose {\n param_0 = f32[20,20,20]{2,1,0} parameter(0)\n transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={2,1,0}\n ROOT bitcast = f32[8000]{0} bitcast(transpose)\n }\n ENTRY reduce {\n p0 = f32[20,20,20]{2,1,0} parameter(0)\n fusion = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_021_transpose\n fusion.1 = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_220_transpose\n ROOT root = (f32[8000]{0}, f32[8000]{0}) tuple(fusion, fusion.1)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_FALSE(\n FusionHeroesAreCompatible(fusion_1->fused_expression_root()->operand(0),\n fusion_2->fused_expression_root()->operand(0)));\n EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest,\n ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsCompatible) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_1230_transpose {\n param_0 = f32[1,20,20]{2,1,0} parameter(0)\n bitcast.1 = f32[20,2,2,5]{3,2,1,0} bitcast(param_0)\n transpose = f32[2,2,5,20]{3,2,1,0} transpose(bitcast.1), dimensions={1,2,3,0}\n ROOT bitcast.2 = f32[400]{0} bitcast(transpose)\n }\n fused_021_transpose {\n param_0 = f32[1,20,20]{2,1,0} parameter(0)\n transpose = f32[1,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1}\n ROOT bitcast = f32[400]{0} bitcast(transpose)\n }\n ENTRY reduce {\n p0 = f32[1,20,20]{2,1,0} parameter(0)\n fusion = f32[400]{0} fusion(p0), kind=kInput, calls=fused_1230_transpose\n fusion.1 = f32[400]{0} fusion(p0), kind=kInput, calls=fused_021_transpose\n ROOT root = (f32[400]{0}, f32[400]{0}) tuple(fusion, fusion.1)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest,\n ShapesCompatibleForMultiOutputFusion_MultiOutputReduceFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_select {\n p1.1 = f32[2,2,2]{2,1,0} parameter(1)\n c0 = f32[] constant(0)\n broadcast = f32[2,2,2]{2,1,0} broadcast(f32[] c0), dimensions={}\n greater-than = pred[2,2,2]{2,1,0} compare(f32[2,2,2]{2,1,0} p1.1, f32[2,2,2]{2,1,0} broadcast), direction=GT\n p0.1 = f32[2,2,2]{2,1,0} parameter(0)\n ROOT select = f32[2,2,2]{2,1,0} select(pred[2,2,2]{2,1,0} greater-than, f32[2,2,2]{2,1,0} p0.1, f32[2,2,2]{2,1,0} broadcast)\n }\n fused_reduce {\n p0.2 = f32[2,2,2]{2,1,0} parameter(0)\n c1 = f32[] constant(0)\n r1 = f32[2,2]{1,0} reduce(p0.2, c1), dimensions={2}, to_apply=scalar_add\n mul = f32[2,2,2]{2,1,0} multiply(p0.2, p0.2)\n r2 = f32[2,2]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add\n ROOT tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(r1, r2)\n }\n ENTRY reduce {\n p0 = f32[2,2,2]{2,1,0} parameter(0)\n p1 = f32[2,2,2]{2,1,0} parameter(1)\n select = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select\n fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(select), kind=kInput, calls=fused_reduce\n gte0 = f32[2,2]{1,0} get-tuple-element(fusion), index=0\n gte1 = f32[2,2]{1,0} get-tuple-element(fusion), index=1\n ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(gte1, gte1, select)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0)->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1)->operand(0);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_ReduceFusions) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduce_1 {\n p0.1 = f32[2,2,2]{2,1,0} parameter(0)\n c0 = f32[] constant(0)\n ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} p0.1, f32[] c0), dimensions={0}, to_apply=scalar_add\n }\n fused_reduce_2 {\n p0.2 = f32[2,2,2]{2,1,0} parameter(0)\n mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2, f32[2,2,2]{2,1,0} p0.2)\n c1 = f32[] constant(0)\n ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} mul, f32[] c1), dimensions={0}, to_apply=scalar_add\n }\n ENTRY reduce {\n p0 = f32[2,2,2]{2,1,0} parameter(0)\n p1 = f32[2,2,2]{2,1,0} parameter(1)\n reduce_1 = f32[2,2]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1\n reduce_2 = f32[2,2]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2\n ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(reduce_1, reduce_2)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest,\n ShapesCompatibleForMultiOutputFusion_DifferentReduceDimensions) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduce_1 {\n p0.1 = f32[32,32,32]{2,1,0} parameter(0)\n c0 = f32[] constant(0)\n ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} p0.1, f32[] c0),\n dimensions={0}, to_apply=scalar_add\n }\n fused_reduce_2 {\n p0.2 = f32[32,32,32]{2,1,0} parameter(0)\n mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2,\n f32[32,32,32]{2,1,0} p0.2)\n c1 = f32[] constant(0)\n ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} mul, f32[] c1),\n dimensions={2}, to_apply=scalar_add\n }\n ENTRY reduce {\n p0 = f32[32,32,32]{2,1,0} parameter(0)\n p1 = f32[32,32,32]{2,1,0} parameter(1)\n reduce_1 = f32[32,32]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1\n reduce_2 = f32[32,32]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2\n ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(reduce_1, reduce_2)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest,\n ShapesCompatibleForMultiOutputFusion_NoReductionToVector) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_element_wise {\n p0.1 = f32[32,32,32]{2,1,0} parameter(0)\n p1.1 = f32[32,32,32]{2,1,0} parameter(1)\n ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)\n }\n fused_reduce {\n p0.2 = f32[32,32,32]{2,1,0} parameter(0)\n mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2,\n f32[32,32,32]{2,1,0} p0.2)\n broadcast = f32[32,32,32,32]{3,2,1,0} broadcast(mul), dimensions={3,2,1}\n c1 = f32[] constant(0)\n ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32,32]{3,2,1,0} broadcast,\n f32[] c1), dimensions={1,3}, to_apply=scalar_add\n }\n ENTRY reduce {\n p0 = f32[32,32,32]{2,1,0} parameter(0)\n p1 = f32[32,32,32]{2,1,0} parameter(1)\n element_wise = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop,\n calls=fused_element_wise\n fusion = f32[32,32]{1,0} fusion(element_wise),\n kind=kLoop, calls=fused_reduce\n ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0})\n tuple(fusion, element_wise)\n })\"))\n .value();\n const HloInstruction* fusion_1 =\n module->entry_computation()->root_instruction()->operand(0);\n const HloInstruction* fusion_2 =\n module->entry_computation()->root_instruction()->operand(1);\n EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));\n}\nTEST_F(GpuFusibleTest, IsFusibleAsMultiOutputFusionRoot) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n ENTRY add {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT add = f32[] add(lhs, rhs)\n })\")\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*root));\n}\nTEST_F(GpuFusibleTest, ScatterIsNotFusibleAsMultiOutputFusionRoot) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n add {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT add = s32[] add(lhs, rhs)\n }\n ENTRY Scatter {\n p0 = s32[3,3] parameter(0)\n operand = s32[3,3] add(p0, p0)\n p1 = s32[2] parameter(1)\n indices = s32[2] add(p1, p1)\n p2 = s32[2,3] parameter(2)\n updates = s32[2,3] add(p2, p2)\n ROOT scatter = s32[3,3] scatter(operand, indices, updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n })\")\n .value();\n const HloInstruction* scatter_inst =\n module->entry_computation()->root_instruction();\n EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*scatter_inst));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionElementwiseAndReduce) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY reduce {\n p0 = f32[32,32,32]{2,1,0} parameter(0)\n c0 = f32[] constant(0)\n exp = f32[32,32,32]{2,1,0} exponential(p0)\n reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},\n to_apply=scalar_add\n ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root->operand(0);\n const HloInstruction* producer = root->operand(1);\n EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));\n EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionTransposeAndLoopFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_add {\n p0.1 = f32[32,31,30]{2,1,0} parameter(0)\n p1.1 = f32[32,31,30]{2,1,0} parameter(1)\n neg = f32[32,31,30]{2,1,0} negate(p0.1)\n ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1)\n }\n ENTRY reduce {\n p0 = f32[32,31,30]{2,1,0} parameter(0)\n p1 = f32[32,30,31]{2,1,0} parameter(1)\n transpose = f32[32,31,30]{2,1,0} transpose(p1), dimensions={0,2,1}\n ROOT add = f32[32,31,30]{2,1,0} fusion(p0, transpose), kind=kLoop, calls=fused_add\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root;\n const HloInstruction* producer = root->operand(1);\n EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionReduceAndLoopFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_add {\n p0.1 = f32[32,31,30]{2,1,0} parameter(0)\n p1.1 = f32[32,31,30]{2,1,0} parameter(1)\n neg = f32[32,31,30]{2,1,0} negate(p0.1)\n ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1)\n }\n ENTRY reduce {\n p0 = f32[32,31,30]{2,1,0} parameter(0)\n p1 = f32[32,31,30,29]{3,2,1,0} parameter(1)\n c0 = f32[] constant(0.0)\n reduce = f32[32,31,30]{2,1,0} reduce(p1, c0), dimensions={3}, to_apply=scalar_add\n ROOT add = f32[32,31,30]{2,1,0} fusion(p0, reduce), kind=kLoop, calls=fused_add\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root;\n const HloInstruction* producer = root->operand(1);\n EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduce) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_add {\n p0.1 = f32[32,32,32]{2,1,0} parameter(0)\n p1.1 = f32[32,32,32]{2,1,0} parameter(1)\n ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)\n }\n ENTRY reduce {\n p0 = f32[32,32,32]{2,1,0} parameter(0)\n p1 = f32[32,32,32]{2,1,0} parameter(1)\n c0 = f32[] constant(0)\n add = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add\n reduce = f32[32,32]{1,0} reduce(add, c0), dimensions={2},\n to_apply=scalar_add\n ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, add)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root->operand(0);\n const HloInstruction* producer = root->operand(1);\n EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));\n EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_select {\n p1.1 = f32[32,32,32]{2,1,0} parameter(1)\n c0 = f32[] constant(0)\n broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}\n greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,\n f32[32,32,32]{2,1,0} broadcast), direction=GT\n p0.1 = f32[32,32,32]{2,1,0} parameter(0)\n ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}\n greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)\n }\n fused_reduce {\n p0.2 = f32[32,32,32]{2,1,0} parameter(0)\n c1 = f32[] constant(0)\n r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2},\n to_apply=scalar_add\n mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2)\n r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},\n to_apply=scalar_add\n ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)\n }\n ENTRY reduce {\n p0 = f32[32,32,32]{2,1,0} parameter(0)\n p1 = f32[32,32,32]{2,1,0} parameter(1)\n select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select\n fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,\n calls=fused_reduce\n ROOT root = ((f32[32,32]{1,0}, f32[32,32]{1,0}), f32[32,32,32]{2,1,0}) tuple(fusion, select)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root->operand(0);\n const HloInstruction* producer = root->operand(1);\n EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));\n EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_element_wise {\n p0.1 = f32[2,2,2]{2,1,0} parameter(0)\n p1.1 = f32[2,2,2]{2,1,0} parameter(1)\n ROOT root = f32[2,2,2]{2,1,0} add(p0.1, p1.1)\n }\n fused_reduce {\n p0.2 = f32[2,2,2]{2,1,0} parameter(0)\n mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2,\n f32[2,2,2]{2,1,0} p0.2)\n broadcast = f32[2,2,2,2]{3,2,1,0} broadcast(mul), dimensions={3,2,1}\n c1 = f32[] constant(0)\n ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2,2]{3,2,1,0} broadcast,\n f32[] c1), dimensions={1,3}, to_apply=scalar_add\n }\n ENTRY reduce {\n p0 = f32[2,2,2]{2,1,0} parameter(0)\n p1 = f32[2,2,2]{2,1,0} parameter(1)\n element_wise = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise\n fusion = f32[2,2]{1,0} fusion(element_wise), kind=kLoop, calls=fused_reduce\n ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(fusion, element_wise)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root->operand(0);\n const HloInstruction* producer = root->operand(1);\n EXPECT_TRUE(IsProducerMultiOutputFusible(*producer));\n EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));\n EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionReduceUnfriendlyLoopFusion) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n mixed_input_layouts_computation {\n p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)\n p1.1 = f16[128,1024,33,33]{3,2,1,0} parameter(1)\n copy = f16[128,1024,33,33]{1,3,2,0} copy(p1.1)\n slice = f16[128,1024,32,32]{1,3,2,0} slice(copy), slice={[0:128],[0:1024],[0:32],[0:32]}\n c0 = f16[] constant(0)\n broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}\n greater-than = pred[128,1024,32,32]{1,3,2,0} compare(slice, broadcast), direction=GT\n ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)\n }\n fused_reduce {\n p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)\n convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)\n c0.2 = f32[] constant(0)\n ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add\n }\n ENTRY reduce {\n p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0)\n p1 = f16[128,1024,33,33]{3,2,1,0} parameter(1)\n loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation\n reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce\n ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root->operand(0);\n const HloInstruction* producer = root->operand(1);\n EXPECT_FALSE(IsProducerMultiOutputFusible(*producer));\n EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer));\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ProducerConsumerFusionInPlaceOperation) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n %fusion {\n %param_0 = s32[4,4]{1,0} parameter(0)\n %copy = s32[4,4]{0,1} copy(%param_0)\n ROOT %transpose = s32[4,4]{1,0} transpose(%copy), dimensions={1,0}\n }\n ENTRY %main {\n %param_0 = s32[4,4]{1,0} parameter(0)\n %constant_0 = s32[] constant(0)\n %constant_1 = s32[] constant(1)\n %constant_1x1_1 = s32[1,1] constant({ {1} })\n %updated = s32[4,4]{1,0} dynamic-update-slice(%param_0, %constant_1x1_1, %constant_1, %constant_0)\n %transpose = s32[4,4]{0,1} fusion(%updated), kind=kLoop, calls=fusion\n ROOT %tuple = tuple(%updated, %transpose)\n })\"))\n .value();\n const HloInstruction* tuple = module->entry_computation()->root_instruction();\n EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple);\n const HloInstruction* dus = tuple->operand(0);\n EXPECT_EQ(dus->opcode(), HloOpcode::kDynamicUpdateSlice);\n const HloInstruction* transpose = tuple->operand(1);\n EXPECT_EQ(transpose->opcode(), HloOpcode::kFusion);\n EXPECT_FALSE(IsProducerMultiOutputFusible(*dus));\n EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*transpose));\n EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*dus, *transpose));\n}\nTEST_F(GpuFusibleTest, NonscalarConstantsNotFused) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n add {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT add = f32[] add(lhs, rhs)\n }\n ENTRY BroadcastIntoReduce {\n constant = f32[16] constant({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15})\n broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={0}\n constant.1 = f32[] constant(0)\n reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3},\n to_apply=add\n ROOT root = (f32[], f32[], f32[16,16,16,16], f32[16]) tuple(reduce, constant.1, broadcast, constant)\n })\")\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* consumer = root->operand(0);\n const HloInstruction* producer = root->operand(1);\n const HloInstruction* consumer2 = root->operand(2);\n const HloInstruction* producer2 = root->operand(3);\n EXPECT_FALSE(\n static_cast(IsProducerConsumerFusible(*producer, *consumer)));\n EXPECT_FALSE(\n static_cast(IsProducerConsumerFusible(*producer2, *consumer2)));\n}\nTEST_F(GpuFusibleTest, FuseLayoutChangingOpWithElementwise) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n ENTRY entry {\n p0 = f32[16,16,16,16]{3,2,1,0} parameter(0)\n copy = f32[16,16,16,16]{0,1,2,3} copy(p0)\n ROOT add = f32[16,16,16,16]{0,1,2,3} add(copy, copy)\n })\")\n .value();\n const HloInstruction* consumer =\n module->entry_computation()->root_instruction();\n const HloInstruction* producer = consumer->operand(0);\n EXPECT_TRUE(\n static_cast(IsProducerConsumerFusible(*producer, *consumer)));\n}\nTEST_F(GpuFusibleTest, FuseReduceWithUnaryElementwise) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY main.12 {\n Arg_0.1 = f32[2048]{0} parameter(0)\n constant.4 = f32[] constant(0.0)\n reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add\n ROOT exp = f32[] exponential(reduce.10)\n })\"))\n .value();\n const HloInstruction* consumer =\n module->entry_computation()->root_instruction();\n const HloInstruction* producer = consumer->operand(0);\n EXPECT_TRUE(\n static_cast(IsProducerConsumerFusible(*producer, *consumer)));\n}\nTEST_F(GpuFusibleTest, DoNotFuseReduceWithRacesWithUnaryElementwise) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY main.12 {\n Arg_0.1 = f32[196608]{0} parameter(0)\n constant.4 = f32[] constant(0.0)\n reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add\n ROOT exp = f32[] exponential(reduce.10)\n })\"))\n .value();\n const HloInstruction* consumer =\n module->entry_computation()->root_instruction();\n const HloInstruction* producer = consumer->operand(0);\n EXPECT_FALSE(\n static_cast(IsProducerConsumerFusible(*producer, *consumer)));\n}\nTEST_F(GpuFusibleTest, CreatesHeavyComputation_NonfusionInstr) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n p_0 = f32[20,50] parameter(0)\n constant_1 = f32[] constant(1)\n reduce-window_1 = f32[21,41] reduce-window(p_0, constant_1),\n window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add\n constant_2 = f32[] constant(2)\n reduce-window_2 = f32[21,41] reduce-window(p_0, constant_2),\n window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add\n ROOT root = (f32[21,41], f32[21,41])\n tuple(reduce-window_1, reduce-window_2)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* producer = root->operand(0);\n const HloInstruction* consumer = root->operand(1);\n EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_NonfusionInstr) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n p_0 = f32[3,5] parameter(0)\n constant = f32[] constant(1)\n broadcast = f32[3, 5] broadcast(f32[] constant), dimensions={}\n scaled_p_0 = f32[3,5] multiply(f32[3, 5] broadcast, f32[3,5]{1, 0} p_0)\n p_1 = f32[2,5] parameter(1)\n reduce-window = f32[3,5] reduce-window(p_1, constant),\n window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add\n ROOT root = (f32[3,5], f32[3,5]) tuple(reduce-window, scaled_p_0)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* producer = root->operand(0);\n const HloInstruction* consumer = root->operand(1);\n EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest,\n DoesNotCreateHeavyComputation_NonoverlappingReduceWindows) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n p_0 = f32[2,5] parameter(0)\n constant_1 = f32[] constant(1)\n reduce-window_1 = f32[3,5] reduce-window(p_0, constant_1),\n window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add\n constant_2 = f32[] constant(2)\n reduce-window_2 = f32[2,3] reduce-window(p_0, constant_2),\n window={size=2x1 pad=0_2x0_0 stride=2x2}, to_apply=scalar_add\n ROOT root = (f32[3,5], f32[2,3]) tuple(reduce-window_1, reduce-window_2)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* producer = root->operand(0);\n const HloInstruction* consumer = root->operand(1);\n EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, CreatesHeavyComputation_ReduceWindowGather) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n ENTRY entry {\n p0 = s32[512,512,2] parameter(0)\n p1 = f32[1,1,512,512] parameter(1)\n constant_1 = f32[] constant(0)\n reduce-window.1 = reduce-window(p1, constant_1),\n window={size=1x1x16x16 stride=1x1x16x16}, to_apply=scalar_add\n ROOT ret = gather(reduce-window.1, p0), offset_dims={0,1,2,3},\n collapsed_slice_dims={}, start_index_map={1,2},\n index_vector_dim=2, slice_sizes={1,1,1,1}\n })\"))\n .value();\n auto gather = module->entry_computation()->root_instruction();\n auto reduce_window = gather->operand(0);\n EXPECT_EQ(gather->opcode(), HloOpcode::kGather);\n EXPECT_EQ(reduce_window->opcode(), HloOpcode::kReduceWindow);\n EXPECT_FALSE(IfFusedReadsElementsMultipleTimes(*reduce_window));\n EXPECT_TRUE(IsExpensiveToRepeat(*reduce_window));\n EXPECT_TRUE(IfFusedReadsElementsMultipleTimes(*gather));\n EXPECT_TRUE(CreatesHeavyComputation(*reduce_window, *gather));\n}\nTEST_F(GpuFusibleTest, CreatesHeavyComputation_FusionInstr) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_producer {\n operand = f32[20,20] parameter(0)\n constant = f32[] constant(1)\n ROOT reduce-window = f32[11,11] reduce-window(operand, constant),\n window={size=20x20 pad=0_10x0_10}, to_apply=scalar_add\n }\n fused_consumer {\n operand_0 = f32[11,11] parameter(0)\n operand_1 = f32[11,11] parameter(1)\n constant = f32[] constant(1)\n reduce-window = f32[11,11] reduce-window(operand_1, constant),\n window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add\n ROOT scaled_operand_1 =\n f32[11,11] multiply(f32[11,11] operand_0, f32[11,11] reduce-window)\n }\n ENTRY entry {\n p0 = f32[20,20] parameter(0)\n p1 = f32[11,11] parameter(1)\n producer = f32[11,11] fusion(p0), kind=kLoop, calls=fused_producer\n consumer = f32[11,11] fusion(p1, producer), kind=kLoop, calls=fused_consumer\n ROOT root = (f32[11,11], f32[11,11]) tuple(producer, consumer)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* producer = root->operand(0);\n const HloInstruction* consumer = root->operand(1);\n EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_FusionInstr) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_producer {\n p_0 = f32[2,2] parameter(0)\n constant = f32[] constant(1)\n ROOT reduce-window = f32[2,2] reduce-window(p_0, constant),\n window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add\n }\n fused_consumer {\n p_0 = f32[2,2] parameter(0)\n p_1 = f32[2,2] parameter(1)\n constant = f32[] constant(1)\n reduce-window = f32[2,2] reduce-window(p_1, constant),\n window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add\n ROOT scaled_p_1 = f32[2,2] multiply(f32[2, 2] p_0, f32[2,2] reduce-window)\n }\n ENTRY entry {\n p_0 = f32[2,2] parameter(0)\n producer = f32[2,2] fusion(p_0), kind=kLoop, calls=fused_producer\n consumer = f32[2,2] fusion(producer, p_0), kind=kLoop, calls=fused_consumer\n ROOT root = (f32[2,2], f32[2,2]) tuple(producer, consumer)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* producer = root->operand(0);\n const HloInstruction* consumer = root->operand(1);\n EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer));\n}\nTEST_F(GpuFusibleTest, ChooseFusionKind) {\n auto module = ParseAndReturnVerifiedModule(R\"(\nHloModule module\nENTRY computation {\n p = f32[1,5000,6000]{2,1,0} parameter(0)\n c = f32[1,6000,5000]{2,1,0} transpose(p), dimensions={0,2,1}\n ROOT r = f32[300,20,5000]{2,1,0} reshape(c)\n}\n)\")\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* producer = root->operand(0);\n EXPECT_EQ(ChooseFusionKind(*producer, *root),\n HloInstruction::FusionKind::kInput);\n}\nTEST_F(GpuFusibleTest, GetFusionRoots1) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n fusion {\n p0 = s32[] parameter(0)\n custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target=\"my_custom_call\"\n get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0\n get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1\n ROOT tuple = (bf16[], s32[], s32[]) tuple(get-tuple-element.0, get-tuple-element.1, p0)\n }\n ENTRY entry{\n p0 = s32[] parameter(0)\n ROOT fusion = (bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion\n }\n )\")\n .value();\n auto called_computations =\n module->entry_computation()->root_instruction()->called_computations();\n ASSERT_EQ(called_computations.size(), 1);\n auto fusion = called_computations.front();\n auto roots = GetFusionRoots(*fusion);\n auto custom_call = fusion->root_instruction()->operand(0)->operand(0);\n auto parameter = fusion->root_instruction()->operand(2);\n std::vector expected_roots{custom_call, custom_call,\n parameter};\n EXPECT_EQ(roots, expected_roots);\n}\nTEST_F(GpuFusibleTest, GetFusionRoots2) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n fusion {\n p0 = s32[] parameter(0)\n custom-call.1 = bf16[] custom-call(p0), custom_call_target=\"my_custom_call1\"\n custom-call.2 = bf16[] custom-call(p0), custom_call_target=\"my_custom_call2\"\n ROOT tuple = (bf16[], bf16[], s32[]) tuple(custom-call.1, custom-call.2, p0)\n }\n ENTRY entry{\n p0 = s32[] parameter(0)\n ROOT fusion = (bf16[], bf16[], s32[]) fusion(p0), kind=kCustom, calls=fusion\n }\n )\")\n .value();\n auto called_computations =\n module->entry_computation()->root_instruction()->called_computations();\n ASSERT_EQ(called_computations.size(), 1);\n auto fusion = called_computations.front();\n auto roots = GetFusionRoots(*fusion);\n auto custom_call1 = fusion->root_instruction()->operand(0);\n auto custom_call2 = fusion->root_instruction()->operand(1);\n auto parameter = fusion->root_instruction()->operand(2);\n std::vector expected_roots{custom_call1, custom_call2,\n parameter};\n EXPECT_EQ(roots, expected_roots);\n}\nTEST_F(GpuFusibleTest, GetFusionRoots3) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n fusion {\n p0 = s32[] parameter(0)\n custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target=\"my_custom_call\"\n get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0\n custom-call.2 = bf16[] custom-call(p0), custom_call_target=\"my_custom_call2\"\n get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1\n ROOT tuple = (bf16[], bf16[], s32[], s32[]) tuple(get-tuple-element.0, custom-call.2, get-tuple-element.1, p0)\n }\n ENTRY entry{\n p0 = s32[] parameter(0)\n ROOT fusion = (bf16[], bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion\n }\n )\")\n .value();\n auto called_computations =\n module->entry_computation()->root_instruction()->called_computations();\n ASSERT_EQ(called_computations.size(), 1);\n auto fusion = called_computations.front();\n auto roots = GetFusionRoots(*fusion);\n auto custom_call1 = fusion->root_instruction()->operand(0)->operand(0);\n auto custom_call2 = fusion->root_instruction()->operand(1);\n auto parameter = fusion->root_instruction()->operand(3);\n std::vector expected_roots{custom_call1, custom_call2,\n custom_call1, parameter};\n EXPECT_EQ(roots, expected_roots);\n}\nTEST_F(GpuFusibleTest, GetFusionRootsWithGTEMakeTupleSequence) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n fusion {\n p0 = s32[] parameter(0)\n p1 = s32[32] parameter(1)\n custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target=\"my_custom_call\"\n get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0\n get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1\n bitcast = s32[1] bitcast(get-tuple-element.1)\n dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)\n get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2\n ROOT tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)\n }\n ENTRY entry{\n p0 = s32[] parameter(0)\n bitcast = s32[32] bitcast(p0)\n ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion\n }\n )\")\n .value();\n auto called_computations =\n module->entry_computation()->root_instruction()->called_computations();\n ASSERT_EQ(called_computations.size(), 1);\n auto fusion = called_computations.front();\n auto roots = GetFusionRoots(*fusion);\n auto custom_call = fusion->root_instruction()->operand(0)->operand(0);\n auto dus = fusion->root_instruction()->operand(1);\n std::vector expected_result{custom_call, dus,\n custom_call};\n EXPECT_EQ(roots, expected_result);\n}\nTEST_F(GpuFusibleTest, GetFusionRootsWithMakeTupleGTESequence) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n fusion {\n p0 = s32[] parameter(0)\n p1 = s32[32] parameter(1)\n custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target=\"my_custom_call\"\n get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0\n get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1\n bitcast = s32[1] bitcast(get-tuple-element.1)\n dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)\n get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2\n tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)\n get-tuple-element.3 = bf16[] get-tuple-element(tuple), index=0\n get-tuple-element.4 = u32[] get-tuple-element(tuple), index=2\n ROOT tuple2 = (bf16[], s32[32], u32[]) tuple(get-tuple-element.3, dynamic-update-slice, get-tuple-element.4)\n }\n ENTRY entry{\n p0 = s32[] parameter(0)\n bitcast = s32[32] bitcast(p0)\n ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion\n }\n )\")\n .value();\n auto called_computations =\n module->entry_computation()->root_instruction()->called_computations();\n ASSERT_EQ(called_computations.size(), 1);\n auto fusion = called_computations.front();\n auto roots = GetFusionRoots(*fusion);\n auto tuple_inst = fusion->root_instruction()->operand(0)->operand(0);\n auto custom_call = tuple_inst->operand(0)->operand(0);\n auto dus = fusion->root_instruction()->operand(1);\n std::vector expected_result{custom_call, dus,\n custom_call};\n EXPECT_EQ(roots, expected_result);\n}\nTEST_F(GpuFusibleTest, GetFusionRootsWithTupleMultipleSameOperands) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule test_module\n fusion {\n p1 = s32[32] parameter(0)\n add0 = s32[32] add(p1, p1)\n ROOT _ = (s32[32], s32[32]) tuple(add0, add0)\n }\n ENTRY entry {\n p0 = s32[32] parameter(0)\n ROOT fusion = (s32[32], s32[32]) fusion(p0), kind=kCustom, calls=fusion\n }\n )\")\n .value();\n auto called_computations =\n module->entry_computation()->root_instruction()->called_computations();\n ASSERT_EQ(called_computations.size(), 1);\n auto fusion = called_computations.front();\n auto roots = GetFusionRoots(*fusion);\n auto add0 = fusion->root_instruction()->operand(0);\n EXPECT_THAT(GetFusionRoots(*fusion), ElementsAre(add0, add0));\n}\nTEST_F(GpuFusibleTest, GetFusibleComputations) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_reduce {\n p0 = f32[128,1024] parameter(0)\n c0 = f32[] constant(0)\n ROOT reduce = f32[128]{0} reduce(p0, c0), dimensions={1}, to_apply=scalar_add\n }\n body_a {\n p0 = f32[128,1024] parameter(0)\n ROOT reduce_fusion = f32[128] fusion(p0), kind=kInput, calls=fused_reduce\n }\n body_b {\n p0 = f32[128,1024] parameter(0)\n c0 = f32[] constant(0)\n ROOT bc = f32[128] broadcast(c0), dimensions={}\n }\n ENTRY main {\n p0 = s32[] parameter(0)\n p1 = f32[128,1024] parameter(1)\n ROOT conditional = f32[128] conditional(p0, p1, p1),\n branch_computations={body_a, body_b}\n })\"))\n .value();\n auto fusible = GetFusibleComputations(*module, {});\n EXPECT_THAT(fusible, ElementsAre(module->GetComputationWithName(\"body_a\"),\n module->GetComputationWithName(\"body_b\"),\n module->entry_computation()));\n}\nTEST_F(GpuFusibleTest, GetSharedMemoryUsage) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n wrapped_transpose {\n p0 = f32[128,1024,2]{2,1,0} parameter(0)\n ROOT transpose = f32[1024,128,2]{2,1,0} transpose(p0), dimensions={1,0,2}\n }\n ENTRY main {\n p = f32[128,1024,2] parameter(0)\n ROOT res = f32[1024,128,2]{2,1,0} fusion(p), kind=kInput, calls=wrapped_transpose\n })\"))\n .value();\n auto& debug_options = module->mutable_config().mutable_debug_options();\n debug_options.set_xla_gpu_mlir_emitter_level(3);\n FusionInfoCache cache;\n auto fusion = module->entry_computation()->root_instruction();\n EXPECT_EQ(cache.GetSharedMemoryUsage(*fusion), 32 * 33 * 2 * 4);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1187,"cells":{"ID":{"kind":"string","value":"c987cf9e-49f9-4548-8911-f4a481f0a4b8"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"buffer_allocations"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/buffer_allocations.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/buffer_allocations.h\"\n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/types/span.h\"\n#include \"xla/service/buffer_assignment.h\"\n#include \"xla/stream_executor/device_memory.h\"\n#include \"tsl/platform/logging.h\"\nnamespace xla {\nnamespace gpu {\nabsl::Status BufferAllocations::TearDown(\n const std::set& live_addresses,\n absl::Span allocations) {\n absl::Status status;\n const int64_t num_buffers = allocations.size();\n for (BufferAllocation::Index i = 0; i < num_buffers; ++i) {\n const BufferAllocation& allocation = allocations[i];\n se::DeviceMemoryBase buffer_address = GetDeviceAddress(allocation.index());\n if ((allocation.maybe_live_out() &&\n !live_addresses.count(buffer_address)) ||\n allocation.IsPreallocatedTempBuffer()) {\n auto dealloc_result =\n memory_allocator_->Deallocate(device_ordinal_, buffer_address);\n if (!dealloc_result.ok() && status.ok()) {\n status = dealloc_result;\n }\n }\n }\n return status;\n}\nse::DeviceMemoryBase BufferAllocations::GetDeviceAddress(\n BufferAllocation::Index buffer_index) const {\n CHECK_GE(buffer_index, 0);\n CHECK_LT(buffer_index, buffers_.size());\n return buffers_[buffer_index];\n}\nse::DeviceMemoryBase& BufferAllocations::GetMutableDeviceAddress(\n BufferAllocation::Index buffer_index) {\n CHECK_GE(buffer_index, 0);\n CHECK_LT(buffer_index, buffers_.size());\n return buffers_[buffer_index];\n}\nse::DeviceMemoryBase BufferAllocations::GetDeviceAddress(\n const BufferAllocation::Slice& buffer_slice) const {\n int64_t index = buffer_slice.index();\n se::DeviceMemoryBase base = GetDeviceAddress(index);\n int64_t offset = buffer_slice.offset();\n CHECK_LE(buffer_slice.offset(), base.size())\n << \"slice offset \" << offset << \" must be smaller than buffer #\" << index\n << \" size \" << base.size();\n int64_t extent = offset + buffer_slice.size();\n CHECK_LE(extent, base.size())\n << \"slice extent \" << extent << \" must be smaller than buffer #\" << index\n << \" size \" << base.size();\n return base.GetByteSlice(buffer_slice.offset(), buffer_slice.size());\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/backends/cpu/runtime/buffer_allocations.h\"\n#include \n#include \n#include \"xla/service/buffer_assignment.h\"\n#include \"xla/service/maybe_owning_device_memory.h\"\n#include \"xla/stream_executor/device_memory.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla::cpu {\nnamespace {\nTEST(BufferAllocationsTest, GetDeviceAddress) {\n std::vector buffers;\n std::vector data = {1.0, 2.0, 3.0, 4.0};\n size_t size_in_bytes = data.size() * sizeof(float);\n buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));\n BufferAllocations allocations(buffers);\n BufferAllocation alloc(0, size_in_bytes, 0);\n BufferAllocation::Slice slice(&alloc, 2 * sizeof(float),\n sizeof(float));\n TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase alloc_mem,\n allocations.GetDeviceAddress(0));\n EXPECT_EQ(alloc_mem.opaque(), &data[0]);\n TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase slice_mem,\n allocations.GetDeviceAddress(slice));\n EXPECT_EQ(slice_mem.opaque(), &data[2]);\n}\nTEST(BufferAllocationsTest, GetDeviceAddressUnchecked) {\n std::vector buffers;\n std::vector data = {1.0, 2.0, 3.0, 4.0};\n size_t size_in_bytes = data.size() * sizeof(float);\n buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));\n BufferAllocations allocations(buffers);\n BufferAllocation alloc(0, size_in_bytes, 0);\n BufferAllocation::Slice slice(&alloc, 2 * sizeof(float),\n sizeof(float));\n se::DeviceMemoryBase alloc_mem = allocations.GetDeviceAddressUnchecked(0);\n EXPECT_EQ(alloc_mem.opaque(), &data[0]);\n se::DeviceMemoryBase slice_mem = allocations.GetDeviceAddressUnchecked(slice);\n EXPECT_EQ(slice_mem.opaque(), &data[2]);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_allocations.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1188,"cells":{"ID":{"kind":"string","value":"9a2dfbbc-53b8-4988-9ab6-73c85b8aa912"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"cudnn_support_utils"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/cudnn_support_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/cudnn_support_utils.h\"\n#include \n#include \n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/gpu/cublas_cudnn.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/util.h\"\n#include \"xla/window_util.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nabsl::StatusOr CudnnSupportsOptimizedIntegerConvolution(\n const se::CudaComputeCapability& compute_capability,\n HloCustomCallInstruction& conv, int vector_size) {\n TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(&conv));\n const Shape& input_shape = conv.operand(0)->shape();\n const Shape& kernel_shape = conv.operand(1)->shape();\n const Shape& result_shape = conv.shape().tuple_shapes(0);\n const auto& dnums = conv.convolution_dimension_numbers();\n if (vector_size != 4 && vector_size != 32) {\n VLOG(3) << \"Unsupported vector size for integer convolution: \"\n << vector_size;\n return false;\n }\n if ((vector_size == 32 && !compute_capability.IsAtLeast(7, 5)) ||\n !compute_capability.IsAtLeast(6, 1)) {\n VLOG(3) << \"Compute capability \" << compute_capability.ToString()\n << \" is not sufficent for int8x\" << vector_size\n << \" vectorization.\";\n return false;\n }\n if (kind != CudnnConvKind::kForward &&\n kind != CudnnConvKind::kForwardActivation) {\n VLOG(3) << \"Convolution kind is not forward or foward-activation: \"\n << conv.ToString();\n return false;\n }\n if (!primitive_util::IsIntegralType(input_shape.element_type()) ||\n !primitive_util::IsIntegralType(kernel_shape.element_type())) {\n VLOG(3) << \"Convolution does not accept integer inputs/weights: \"\n << conv.ToString();\n return false;\n }\n if (dnums.input_spatial_dimensions().size() != 2 ||\n dnums.kernel_spatial_dimensions().size() != 2 ||\n dnums.output_spatial_dimensions().size() != 2) {\n VLOG(3) << \"Convolution is not 2D: \" << conv.ToString();\n return false;\n }\n if (vector_size == 32 &&\n !primitive_util::IsIntegralType(result_shape.element_type())) {\n VLOG(3) << \"int8x32 convolutions only support integer output: \"\n << conv.ToString();\n return false;\n }\n if (vector_size == 32) {\n int64_t W = input_shape.dimensions(dnums.input_spatial_dimensions()[0]);\n int64_t H = input_shape.dimensions(dnums.input_spatial_dimensions()[1]);\n int64_t R = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[0]);\n int64_t S = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[1]);\n const int64_t dilationW = conv.window().dimensions()[0].base_dilation();\n const int64_t dilationH = conv.window().dimensions()[1].base_dilation();\n if ((W <= (R - 1) * dilationW) || (H <= (S - 1) * dilationH)) {\n VLOG(3) << \"Conv spatial filter/input dimensions are too small for \"\n \"vecotrized int8x32 convolution: \"\n << conv.ToString();\n return false;\n }\n }\n if (window_util::HasDilation(conv.window())) {\n VLOG(3) << \"Vectorized integer convolutions do not support dilation: \"\n << conv.ToString();\n return false;\n }\n return true;\n}\nabsl::StatusOr\nCudnnInferTransposeForFilterReordering(\n const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers) {\n if (shape.rank() != 4 && shape.rank() != 5) {\n return Internal(\"Filter shape has unexpected rank.\");\n }\n const int64_t dO = dimension_numbers.kernel_output_feature_dimension();\n const int64_t dI = dimension_numbers.kernel_input_feature_dimension();\n const int64_t dH = dimension_numbers.kernel_spatial_dimensions().at(0);\n const int64_t dW = dimension_numbers.kernel_spatial_dimensions().at(1);\n bool revectorize = shape.rank() == 5;\n const int64_t dZ = revectorize ? 10 - dO - dI - dH - dW : -1;\n const int64_t vsize = revectorize ? shape.dimensions(dZ) : 1;\n if (shape.dimensions(dO) % 32 != 0 ||\n shape.dimensions(dI) % (32 / vsize) != 0 ||\n (revectorize && vsize != 4 && vsize != 32)) {\n return Internal(\"Filter shape is not vectorizable.\");\n }\n std::vector output = {\n shape.dimensions(dO), shape.dimensions(dI) / (32 / vsize),\n shape.dimensions(dH), shape.dimensions(dW), 32};\n Shape output_shape = ShapeUtil::MakeShape(shape.element_type(), output);\n auto calc_index = [&](int dim) {\n bool split_v = vsize == 32;\n return (revectorize\n ? (dI < dim ? 2 - split_v : 0) + (dZ < dim ? 1 + split_v : 0)\n : (dI < dim ? 3 : 0)) +\n (dO < dim ? 3 : 0) + (dH < dim) + (dW < dim);\n };\n int idx_O = calc_index(dO);\n int idx_I = calc_index(dI);\n int idx_H = calc_index(dH);\n int idx_W = calc_index(dW);\n int idx_Y = vsize == 32 ? calc_index(dZ) : idx_I + 1;\n int idx_Z = vsize == 4 ? calc_index(dZ) : vsize == 32 ? idx_Y + 1 : idx_I + 2;\n std::vector dims(8);\n dims[idx_O] = shape.dimensions(dO) / 8;\n dims[idx_O + 1] = 4;\n dims[idx_O + 2] = 2;\n dims[idx_I] = shape.dimensions(dI) / (32 / vsize);\n dims[idx_Y] = 8;\n dims[idx_Z] = 4;\n dims[idx_H] = shape.dimensions(dH);\n dims[idx_W] = shape.dimensions(dW);\n Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);\n std::vector permutation = {idx_I, idx_H, idx_W, idx_O,\n idx_O + 2, idx_Y, idx_O + 1, idx_Z};\n return CudnnReorderTransposeConfig{split_shape, output_shape, permutation};\n}\nabsl::StatusOr\nCudnnInferTransposeForBiasReordering(const Shape& shape) {\n if (shape.rank() != 1) {\n return Internal(\"Bias shape has unexpected rank.\");\n }\n if (shape.dimensions(0) % 32 != 0) {\n return Internal(\"Bias shape is not vectorizable.\");\n }\n std::vector dims = {shape.dimensions(0) / 32, 4, 2, 4};\n Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);\n std::vector permutation = {0, 2, 1, 3};\n return CudnnReorderTransposeConfig{split_shape, shape, permutation};\n}\nbool IsWorkspaceAllocationRoot(const HloInstruction& root) {\n return root.IsRoot() && root.opcode() == HloOpcode::kTuple &&\n root.operand_count() == 2 &&\n root.operand(1)->IsCustomCall(kWorkspaceAllocationCustomCallTarget) &&\n root.operand(1)->operand_count() == 0;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/cudnn_support_utils.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/verified_hlo_module.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing ::tsl::testing::IsOkAndHolds;\nclass CudnnSupportUtilsTest : public HloTestBase {\n public:\n absl::StatusOr GetCustomCall(\n xla::VerifiedHloModule* module, absl::string_view target) {\n HloCustomCallInstruction* call = nullptr;\n for (HloComputation* comp : module->MakeNonfusionComputations()) {\n for (HloInstruction* inst : comp->instructions()) {\n if (inst->IsCustomCall(target)) {\n VLOG(1) << inst->ToString();\n if (call != nullptr) {\n return tsl::errors::FailedPrecondition(\n \"Found more than one custom call.\");\n }\n call = Cast(inst);\n }\n }\n }\n if (call == nullptr) {\n return tsl::errors::FailedPrecondition(\n \"Did not find any matching custom call.\");\n }\n return call;\n }\n};\nTEST_F(CudnnSupportUtilsTest,\n CudnnSupportsOptimizedIntegerConvolutionCheckVectorSize) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[8,10,10,128] parameter(0)\n filter = s8[2,2,128,128] parameter(1)\n ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n HloCustomCallInstruction* conv;\n TF_ASSERT_OK_AND_ASSIGN(conv,\n GetCustomCall(module.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(true));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(true));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 7),\n IsOkAndHolds(false));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 1),\n IsOkAndHolds(false)); \n}\nTEST_F(CudnnSupportUtilsTest,\n CudnnSupportsOptimizedIntegerConvolutionCheckComputeCapability) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[8,10,10,128] parameter(0)\n filter = s8[2,2,128,128] parameter(1)\n ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n HloCustomCallInstruction* conv;\n TF_ASSERT_OK_AND_ASSIGN(conv,\n GetCustomCall(module.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 0}, *conv, 4),\n IsOkAndHolds(false));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 1}, *conv, 4),\n IsOkAndHolds(true));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 4}, *conv, 32),\n IsOkAndHolds(false));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(true));\n}\nTEST_F(CudnnSupportUtilsTest,\n CudnnSupportsOptimizedIntegerConvolutionCheckKind) {\n auto moduleFwd = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[32,10,10,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n HloCustomCallInstruction* conv;\n TF_ASSERT_OK_AND_ASSIGN(\n conv, GetCustomCall(moduleFwd.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(true));\n auto moduleBwdFilter = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = f16[10,20,30,41] parameter(0)\n output = f16[10,20,30,40] parameter(1)\n result = (f16[2,2,41,40], u8[0]) custom-call(input, output),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convBackwardFilter\"\n ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(\n conv, GetCustomCall(moduleBwdFilter.get(), \"__cudnn$convBackwardFilter\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(false));\n auto moduleBwdInput = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n output = f16[10,20,30,40] parameter(0)\n filter = f16[2,2,41,40] parameter(1)\n result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convBackwardInput\"\n ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(\n conv, GetCustomCall(moduleBwdInput.get(), \"__cudnn$convBackwardInput\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(false));\n}\nTEST_F(CudnnSupportUtilsTest,\n CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckTypes) {\n auto moduleS8InOut = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[32,10,10,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n HloCustomCallInstruction* conv;\n TF_ASSERT_OK_AND_ASSIGN(\n conv, GetCustomCall(moduleS8InOut.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(true));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(true));\n auto moduleS8InF32Out = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[32,10,10,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(\n conv, GetCustomCall(moduleS8InF32Out.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(true));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(false)); \n auto moduleF32InF32Out = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = f32[32,10,10,64] parameter(0)\n filter = f32[2,2,64,128] parameter(1)\n ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(\n conv, GetCustomCall(moduleF32InF32Out.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(false));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(false));\n}\nTEST_F(CudnnSupportUtilsTest,\n CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDims) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[32,10,10,10,64] parameter(0)\n filter = s8[2,2,2,64,128] parameter(1)\n ROOT result = (s8[32,10,10,10,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b012f_012io->b012f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n HloCustomCallInstruction* conv;\n TF_ASSERT_OK_AND_ASSIGN(conv,\n GetCustomCall(module.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(false));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(false));\n}\nTEST_F(CudnnSupportUtilsTest,\n CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDilation) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[32,10,10,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n ROOT result = (s8[32,20,20,128], u8[0]) custom-call(input, filter),\n window={size=2x2 rhs_dilate=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n HloCustomCallInstruction* conv;\n TF_ASSERT_OK_AND_ASSIGN(conv,\n GetCustomCall(module.get(), \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(false));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(false));\n}\nTEST_F(CudnnSupportUtilsTest,\n CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckAlgo1Dims) {\n auto moduleFilterCoversInput = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[32,2,2,64] parameter(0)\n filter = s8[3,3,64,128] parameter(1)\n ROOT result = (s8[32,2,2,128], u8[0]) custom-call(input, filter),\n window={size=3x3}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n HloCustomCallInstruction* conv;\n TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterCoversInput.get(),\n \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(true));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(false));\n auto moduleFilterAlmostCoversInput = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[32,3,3,64] parameter(0)\n filter = s8[3,3,64,128] parameter(1)\n ROOT result = (s8[32,3,3,128], u8[0]) custom-call(input, filter),\n window={size=3x3}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(conv,\n GetCustomCall(moduleFilterAlmostCoversInput.get(),\n \"__cudnn$convForward\"));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),\n IsOkAndHolds(true));\n EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),\n IsOkAndHolds(true));\n}\nclass ReorderFilterRank4Test : public ::testing::TestWithParam {};\nTEST_P(ReorderFilterRank4Test, InferTransposeRank4) {\n auto input_dims = GetParam();\n size_t dI = input_dims.find('i');\n size_t dO = input_dims.find('o');\n size_t dH = input_dims.find('0');\n size_t dW = input_dims.find('1');\n ConvolutionDimensionNumbers dnums;\n dnums.set_kernel_input_feature_dimension(dI);\n dnums.set_kernel_output_feature_dimension(dO);\n dnums.add_kernel_spatial_dimensions(dH);\n dnums.add_kernel_spatial_dimensions(dW);\n int64_t shape_dims[4] = {0, 0, 0, 0};\n shape_dims[dI] = 224;\n shape_dims[dO] = 96;\n shape_dims[dH] = 5;\n shape_dims[dW] = 3;\n Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));\n auto input = HloInstruction::CreateParameter(0, shape, \"input\");\n auto filter = HloInstruction::CreateParameter(1, shape, \"filter\");\n TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,\n CudnnInferTransposeForFilterReordering(shape, dnums));\n EXPECT_THAT(inferred_config.result_shape.dimensions(),\n ::testing::ElementsAre(96, 7, 5, 3, 32));\n Shape reshaped = ShapeUtil::PermuteDimensions(\n inferred_config.permutation, inferred_config.transpose_shape);\n EXPECT_THAT(reshaped.dimensions(),\n ::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));\n EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);\n EXPECT_EQ(inferred_config.permutation[7], inferred_config.permutation[5] + 1);\n}\nstd::vector GeneratePermutations(std::string input_dims) {\n std::sort(input_dims.begin(), input_dims.end());\n std::vector permutations;\n do {\n permutations.push_back(input_dims);\n } while (std::next_permutation(input_dims.begin(), input_dims.end()));\n return permutations;\n}\nINSTANTIATE_TEST_SUITE_P(ReorderTestSuite, ReorderFilterRank4Test,\n ::testing::ValuesIn(GeneratePermutations(\"01io\")));\nclass ReorderFilterRank5Test\n : public ::testing::TestWithParam> {};\nTEST_P(ReorderFilterRank5Test, InferTransposeRank5) {\n auto [input_dims, vsize] = GetParam();\n size_t dI = input_dims.find('i');\n size_t dO = input_dims.find('o');\n size_t dH = input_dims.find('0');\n size_t dW = input_dims.find('1');\n ConvolutionDimensionNumbers dnums;\n dnums.set_kernel_input_feature_dimension(dI);\n dnums.set_kernel_output_feature_dimension(dO);\n dnums.add_kernel_spatial_dimensions(dH);\n dnums.add_kernel_spatial_dimensions(dW);\n int64_t shape_dims[5] = {vsize, vsize, vsize, vsize, vsize};\n shape_dims[dI] = 224 / vsize;\n shape_dims[dO] = 96;\n shape_dims[dH] = 5;\n shape_dims[dW] = 3;\n Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));\n auto input = HloInstruction::CreateParameter(0, shape, \"input\");\n auto filter = HloInstruction::CreateParameter(1, shape, \"filter\");\n TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,\n CudnnInferTransposeForFilterReordering(shape, dnums));\n EXPECT_THAT(inferred_config.result_shape.dimensions(),\n ::testing::ElementsAre(96, 7, 5, 3, 32));\n Shape reshaped = ShapeUtil::PermuteDimensions(\n inferred_config.permutation, inferred_config.transpose_shape);\n EXPECT_THAT(reshaped.dimensions(),\n ::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));\n EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);\n}\nINSTANTIATE_TEST_SUITE_P(\n ReorderTestSuite, ReorderFilterRank5Test,\n ::testing::Combine(::testing::ValuesIn(GeneratePermutations(\"01?io\")),\n ::testing::Values(4, 32)));\nclass ReorderBiasTest : public ::testing::Test {};\nTEST_F(ReorderBiasTest, InferTranspose) {\n Shape shape = ShapeUtil::MakeShape(U8, {96});\n auto bias = HloInstruction::CreateParameter(2, shape, \"bias\");\n Shape unused = ShapeUtil::MakeNil();\n auto input = HloInstruction::CreateParameter(0, unused, \"input\");\n auto filter = HloInstruction::CreateParameter(1, unused, \"filter\");\n TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,\n CudnnInferTransposeForBiasReordering(shape));\n Shape reshaped = ShapeUtil::PermuteDimensions(\n inferred_config.permutation, inferred_config.transpose_shape);\n EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(3, 2, 4, 4));\n EXPECT_EQ(inferred_config.permutation[2], 1);\n EXPECT_EQ(inferred_config.permutation[3], 3);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1189,"cells":{"ID":{"kind":"string","value":"351b5a0b-c4a1-46de-8a0c-dd47539046d9"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"split_k_gemm_rewriter"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/split_k_gemm_rewriter.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/split_k_gemm_rewriter_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/split_k_gemm_rewriter.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/cord.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/autotuning.pb.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/ir/hlo_schedule.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/layout.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/gpu/fusions/triton/triton_support_legacy.h\"\n#include \"xla/service/gpu/ir_emission_utils.h\"\n#include \"xla/service/gpu/matmul_utils.h\"\n#include \"xla/service/gpu/triton_fusion_analysis.h\"\n#include \"xla/service/gpu/triton_tiling_propagation.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nbool HasDivisibleSuffixAllowingSplit(const absl::Span span,\n const int64_t divisor) {\n CHECK_GE(divisor, 1);\n int64_t product = 1;\n for (auto it = span.crbegin(); it != span.crend(); ++it) {\n product *= *it;\n if (product % divisor == 0) {\n return true;\n }\n if (divisor % product != 0) {\n return false;\n }\n }\n return false;\n}\nnamespace {\nvoid CopyIncrementingAboveThreshold(\n const tsl::protobuf::RepeatedField& source,\n tsl::protobuf::RepeatedField& destination, const int threshold) {\n destination.Reserve(source.size());\n for (int64_t x : source) {\n if (x >= threshold) {\n ++x;\n }\n destination.Add(x);\n }\n}\nvoid CopyIncrementingAboveThreshold(absl::Span source,\n DimensionVector& destination,\n const int threshold) {\n destination.reserve(source.size());\n for (int64_t x : source) {\n if (x >= threshold) {\n ++x;\n }\n destination.push_back(x);\n }\n}\nabsl::Status UncompilableMatmul(absl::string_view explanation) {\n absl::Status s = absl::CancelledError(explanation);\n s.SetPayload(kUncompilableFusion, absl::Cord(explanation));\n return s;\n}\nabsl::StatusOr MakeSparseMetaOperand(\n HloDotInstruction& dot, const TritonGemmConfig& config) {\n CHECK_EQ(dot.sparse_operands(), 1);\n CHECK_EQ(dot.sparsity().front().index(), 0);\n HloInstruction* meta = dot.mutable_operand(2);\n const Shape& shape = meta->shape();\n if (shape.dimensions().back() % config.split_k != 0) {\n return UncompilableMatmul(\"Sparsity metadata has incorrect shape.\");\n }\n std::vector dimensions(shape.dimensions().begin(),\n shape.dimensions().end() - 1);\n dimensions.push_back(config.split_k);\n dimensions.push_back(shape.dimensions().back() / config.split_k);\n Shape new_shape = ShapeUtil::MakeShapeWithDescendingLayout(\n shape.element_type(), dimensions);\n return MakeBitcastHlo(meta, new_shape);\n}\n} \nabsl::StatusOr MakeSplitKOperand(\n HloInstruction& dot, const TritonFusionAnalysis& analysis,\n const TritonGemmConfig& config, const int64_t contracting_dim_idx,\n const int operand_number) {\n HloInstruction* operand = dot.mutable_operand(operand_number);\n const int64_t k = operand->shape().dimensions(contracting_dim_idx);\n const bool need_padding = k % config.split_k != 0;\n TritonFusionAnalysis::Scope scope = (operand_number == 0)\n ? TritonFusionAnalysis::Scope::LHS\n : TritonFusionAnalysis::Scope::RHS;\n auto check_if_supported = [&](const HloInstruction& hlo,\n bool check_divisibility) {\n const TensorIterationSpec::DimIterationSpec* spec =\n analysis.IterSpec(scope, &hlo, contracting_dim_idx);\n if (spec == nullptr) {\n return absl::OkStatus();\n }\n if (spec->size() != 1) {\n return UncompilableMatmul(\"Unsupported case.\");\n }\n const TensorIterationSpec::IterationSpecFragment& fragment = spec->at(0);\n if (fragment.is_sliced()) {\n return UncompilableMatmul(\n \"Sliced contracting dimension is not supported yet.\");\n }\n if (check_divisibility && !HasDivisibleSuffixAllowingSplit(\n fragment.subfragments, config.split_k)) {\n return UncompilableMatmul(\"Contracting dimension is too fragmented.\");\n }\n if (config.split_k > ceil(1.0 * fragment.count / config.block_k)) {\n return UncompilableMatmul(\n \"Too small divisible part of the contracting dimension.\");\n }\n return absl::OkStatus();\n };\n TF_RETURN_IF_ERROR(\n check_if_supported(*operand, !need_padding));\n for (const HloInstruction* param : analysis.ScopeParameters(scope)) {\n TF_RETURN_IF_ERROR(\n check_if_supported(*param, !need_padding));\n }\n if (need_padding) {\n HloInstruction* const zero =\n dot.parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(operand->shape().element_type())));\n PaddingConfig padding_config = MakeNoPaddingConfig(operand->shape().rank());\n padding_config.mutable_dimensions(contracting_dim_idx)\n ->set_edge_padding_high(config.split_k - k % config.split_k);\n TF_ASSIGN_OR_RETURN(HloInstruction * pad,\n MakePadHlo(operand, zero, padding_config));\n *pad->mutable_shape()->mutable_layout() = operand->shape().layout();\n operand = pad;\n }\n CHECK_GE(operand->shape().dimensions(contracting_dim_idx), config.split_k);\n const Shape& shape = operand->shape();\n Shape new_shape(shape.element_type(), {}, {}, {});\n for (int i = 0; i < shape.rank(); ++i) {\n const int64_t dimension_size = shape.dimensions(i);\n if (i == contracting_dim_idx) {\n new_shape.add_dimensions(config.split_k);\n new_shape.add_dimensions(dimension_size / config.split_k);\n } else {\n new_shape.add_dimensions(dimension_size);\n }\n }\n Layout* new_layout = new_shape.mutable_layout();\n for (int64_t logical_dim_idx : shape.layout().minor_to_major()) {\n if (logical_dim_idx >= contracting_dim_idx) {\n new_layout->add_minor_to_major(logical_dim_idx + 1);\n }\n if (logical_dim_idx <= contracting_dim_idx) {\n new_layout->add_minor_to_major(logical_dim_idx);\n }\n }\n return MakeBitcastHlo(operand, new_shape);\n}\nabsl::Status MakeDotComputationSplitKBatch(\n HloComputation* computation, const TritonGemmConfig& config,\n bool disable_reduced_precision_reduction) {\n HloDotInstruction* dot = Cast(\n hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot));\n TF_ASSIGN_OR_RETURN(const auto analysis,\n TritonFusionAnalysis::Execute(*computation));\n const DotDimensionNumbers& old_dim_numbers = dot->dot_dimension_numbers();\n DotDimensionNumbers new_dim_numbers;\n TF_ASSIGN_OR_RETURN(const int64_t lhs_contracting_idx,\n ContractingDimensionIndex(*dot, 0));\n CopyIncrementingAboveThreshold(\n old_dim_numbers.lhs_contracting_dimensions(),\n *new_dim_numbers.mutable_lhs_contracting_dimensions(),\n lhs_contracting_idx);\n new_dim_numbers.mutable_lhs_batch_dimensions()->Add(lhs_contracting_idx);\n CopyIncrementingAboveThreshold(\n old_dim_numbers.lhs_batch_dimensions(),\n *new_dim_numbers.mutable_lhs_batch_dimensions(), lhs_contracting_idx);\n TF_ASSIGN_OR_RETURN(const int64_t rhs_contracting_idx,\n ContractingDimensionIndex(*dot, 1));\n CopyIncrementingAboveThreshold(\n old_dim_numbers.rhs_contracting_dimensions(),\n *new_dim_numbers.mutable_rhs_contracting_dimensions(),\n rhs_contracting_idx);\n new_dim_numbers.mutable_rhs_batch_dimensions()->Add(rhs_contracting_idx);\n CopyIncrementingAboveThreshold(\n old_dim_numbers.rhs_batch_dimensions(),\n *new_dim_numbers.mutable_rhs_batch_dimensions(), rhs_contracting_idx);\n if (dot->sparse_operands()) {\n if (dot->sparsity().size() != 1 || dot->sparsity().front().index() != 0) {\n return UncompilableMatmul(\"Sparsity is only supported on left operand.\");\n }\n }\n std::stack to_process;\n absl::flat_hash_set to_process_set;\n HloInstruction* current = dot;\n do {\n to_process.push(current);\n CHECK(to_process_set.insert(current).second);\n if (current->users().empty()) {\n break;\n }\n CHECK_EQ(current->user_count(), 1);\n current = current->users()[0];\n if (!legacy_triton::IsDistributiveOverAddition(*current)) {\n return Cancelled(\"Operation non-distributive over addition after dot.\");\n }\n } while (true);\n bool did_pad = false;\n while (!to_process.empty()) {\n HloInstruction* current = to_process.top();\n to_process.pop();\n HloInstruction* expanded;\n if (current == dot) {\n TF_ASSIGN_OR_RETURN(\n HloInstruction * lhs,\n MakeSplitKOperand(*dot, analysis, config, lhs_contracting_idx, 0));\n TF_ASSIGN_OR_RETURN(\n HloInstruction * rhs,\n MakeSplitKOperand(*dot, analysis, config, rhs_contracting_idx, 1));\n if (lhs->operand(0)->opcode() == HloOpcode::kPad) {\n CHECK_EQ(rhs->operand(0)->opcode(), HloOpcode::kPad);\n did_pad = true;\n }\n std::vector sparsity(dot->sparsity().begin(),\n dot->sparsity().end());\n std::vector sparse_meta(sparsity.size());\n for (int i = 0; i < sparsity.size(); ++i) {\n sparsity[i].set_dimension(sparsity[i].dimension() + 1);\n TF_ASSIGN_OR_RETURN(sparse_meta[i],\n MakeSparseMetaOperand(*dot, config));\n }\n expanded = MakeDotHlo(lhs, rhs, new_dim_numbers, dot->precision_config(),\n dot->shape().element_type(), sparsity, sparse_meta)\n .value();\n expanded->mutable_shape()->mutable_layout()->clear_minor_to_major();\n CopyIncrementingAboveThreshold(dot->shape().layout().minor_to_major(),\n *expanded->mutable_shape()\n ->mutable_layout()\n ->mutable_minor_to_major(),\n 0);\n expanded->mutable_shape()->mutable_layout()->add_minor_to_major(0);\n dot->SetupDerivedInstruction(expanded);\n } else {\n expanded = computation->AddInstruction(current->CloneWithNewShape(\n ShapeUtil::PrependMajorDimension(config.split_k, current->shape())));\n if (expanded->opcode() == HloOpcode::kTranspose) {\n const auto* old_transpose = Cast(current);\n auto* new_transpose = Cast(expanded);\n new_transpose->mutable_dimensions()->clear();\n new_transpose->mutable_dimensions()->reserve(\n new_transpose->shape().rank());\n new_transpose->mutable_dimensions()->push_back(0);\n for (const int64_t dim : old_transpose->dimensions()) {\n new_transpose->mutable_dimensions()->push_back(dim + 1);\n }\n }\n }\n TF_RETURN_IF_ERROR(current->ReplaceAllUsesWithDifferentShape(expanded));\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(current));\n if (current == dot) {\n continue;\n }\n for (int i = 0; i < expanded->operands().size(); ++i) {\n HloInstruction* operand = expanded->mutable_operand(i);\n if (!to_process_set.contains(operand)) {\n std::vector broadcast_dimensions(operand->shape().rank());\n absl::c_iota(broadcast_dimensions, 1);\n TF_RETURN_IF_ERROR(expanded->ReplaceOperandWithDifferentShape(\n i, MakeBroadcastHlo(operand, broadcast_dimensions,\n ShapeUtil::PrependMajorDimension(\n config.split_k, operand->shape()))));\n }\n }\n }\n if (disable_reduced_precision_reduction) {\n PrimitiveType output_type =\n computation->root_instruction()->shape().element_type();\n PrimitiveType accumulator_type = output_type == PrimitiveType::F64\n ? PrimitiveType::F64\n : PrimitiveType::F32;\n computation->root_instruction()->mutable_shape()->set_element_type(\n accumulator_type);\n }\n if (did_pad) {\n TF_RETURN_IF_ERROR(\n TritonFusionAnalysis::Execute(*computation, config.split_k).status());\n }\n return absl::OkStatus();\n}\nabsl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion,\n const TritonGemmConfig& config) {\n CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion);\n if (dot_fusion->shape().IsTuple()) {\n return Unimplemented(\"Tuple output is not supported with split-K yet.\");\n }\n const bool disable_reduced_precision_reduction =\n dot_fusion->GetModule()\n ->config()\n .debug_options()\n .xla_gpu_triton_gemm_disable_reduced_precision_reduction();\n const PrimitiveType output_type = dot_fusion->shape().element_type();\n const Layout output_layout = dot_fusion->shape().layout();\n TF_RETURN_IF_ERROR(MakeDotComputationSplitKBatch(\n dot_fusion->fused_instructions_computation(), config,\n disable_reduced_precision_reduction));\n const HloInstruction* root = dot_fusion->fused_expression_root();\n *dot_fusion->mutable_shape() = root->shape();\n HloInstruction* zero =\n dot_fusion->parent()->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::Zero(root->shape().element_type())));\n TF_ASSIGN_OR_RETURN(HloInstruction * reduce,\n MakeReduceHlo(dot_fusion, zero, {0},\n HloOpcode::kAdd, &dot_fusion->metadata()));\n *reduce->mutable_shape()->mutable_layout() = output_layout;\n if (dot_fusion->IsRoot()) {\n dot_fusion->parent()->set_root_instruction(reduce,\n true);\n } else {\n TF_RETURN_IF_ERROR(dot_fusion->ReplaceAllUsesWithDifferentShape(reduce));\n }\n if (disable_reduced_precision_reduction) {\n HloInstruction* convert = MakeConvertToHlo(reduce, output_type);\n if (reduce->IsRoot()) {\n reduce->parent()->set_root_instruction(convert,\n true);\n } else {\n TF_RETURN_IF_ERROR(reduce->ReplaceAllUsesWithDifferentShape(convert));\n }\n }\n return absl::OkStatus();\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/split_k_gemm_rewriter.h\"\n#include \n#include \n#include \n#include \n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/autotuning.pb.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/layout.h\"\n#include \"xla/service/gpu/matmul_utils.h\"\n#include \"xla/service/gpu/triton_fusion_analysis.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/service/layout_assignment.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/verified_hlo_module.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/xla.pb.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing ::testing::ElementsAre;\nusing ::testing::FieldsAre;\nnamespace m = ::xla::match;\nTEST(HasDivisibleSuffixAllowingSplitTest, AllTests) {\n EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({1}, 1));\n EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2}, 2));\n EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 2}, 2));\n EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 2}, 6));\n EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 3, 2}, 6));\n EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({15, 2}, 6));\n EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 15, 2}, 6));\n EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({}, 1));\n EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({1}, 2));\n EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({3}, 2));\n EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({2, 3}, 2));\n}\nusing SplitKTest = HloTestBase;\nTEST_F(SplitKTest, MakeSplitK) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)\n bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)\n copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)\n reshape.5 = s8[480,128]{1,0} reshape(copy.1)\n convert.8 = bf16[480,128]{1,0} convert(reshape.5)\n parameter_1 = bf16[16,128]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)\n p1 = bf16[16,128]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\",\n metadata={op_name=\"foo\"}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n const HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_EQ(root->opcode(), HloOpcode::kReduce);\n EXPECT_EQ(root->metadata().op_name(), \"foo\");\n}\nTEST_F(SplitKTest, MakeSplitKWithOutputFusion) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_gemm_dot {\n p0 = f16[480,128]{1,0} parameter(0)\n p1 = f16[16,128]{1,0} parameter(1)\n d = f16[480,16]{1,0} dot(p0, p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n c = bf16[] constant(123)\n n = bf16[] negate(c)\n bc = bf16[480,16]{1,0} broadcast(n)\n cv = bf16[480,16]{1,0} convert(d)\n ROOT a = bf16[480,16]{1,0} multiply(bc, cv)\n}\nENTRY e {\n p0 = f16[480,128]{1,0} parameter(0)\n p1 = f16[16,128]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),\n HloOpcode::kReduce);\n}\nTEST_F(SplitKTest, PreventSplitKWithNonDistributiveOperations) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_gemm_dot {\n p0 = f16[480,128]{1,0} parameter(0)\n p1 = f16[16,128]{1,0} parameter(1)\n d = f16[480,16]{1,0} dot(p0, p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n c = f32[480,16]{1,0} convert(d)\n ROOT s = f32[480,16]{1,0} tanh(c)\n}\nENTRY e {\n p0 = f16[480,128]{1,0} parameter(0)\n p1 = f16[16,128]{1,0} parameter(1)\n ROOT fusion = f32[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n EXPECT_THAT(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config),\n tsl::testing::StatusIs(\n tsl::error::CANCELLED,\n absl::StrFormat(\n \"Operation non-distributive over addition after dot.\")));\n}\nTEST_F(SplitKTest, MakeSplitKWithNonDivisibleDimensionSize) {\n constexpr absl::string_view kHloText = R\"(\nt {\n c1 = s32[] constant(1)\n bc1 = s32[31]{0} broadcast(c1), dimensions={}\n p0 = s32[31]{0} parameter(0)\n cmp = pred[31]{0} compare(bc1, p0), direction=EQ\n cvt = f32[31]{0} convert(cmp)\n bc2 = f32[17,31]{1,0} broadcast(cvt), dimensions={1}\n c0 = f32[] constant(0)\n bc0 = f32[17,16]{1,0} broadcast(c0), dimensions={}\n ROOT dot = f32[31,16]{1,0} dot(bc2, bc0),\n lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = s32[31]{0} parameter(0)\n ROOT r = f32[31,16]{1,0} fusion(p0),\n kind=kCustom, calls=t, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 2, 1, 2);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n}\nTEST_F(SplitKTest, AvoidSplitKWithSlicedContractingDimension) {\n const std::string hlo_text = R\"(\nt {\n p0 = f16[32,1234] parameter(0)\n s0 = f16[32,256] slice(p0), slice={[0:32], [41:297]}\n p1 = f16[256,768] parameter(1)\n ROOT d = f16[32,768] dot(s0, p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = f16[32,1234] parameter(0)\n p1 = f16[256,768] parameter(1)\n ROOT r = f16[32,768] fusion(p0, p1),\n kind=kCustom, calls=t, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 2, 1, 2);\n EXPECT_THAT(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config),\n tsl::testing::StatusIs(\n tsl::error::CANCELLED,\n absl::StrFormat(\n \"Sliced contracting dimension is not supported yet.\")));\n}\nTEST_F(SplitKTest, MakeSplitKWithNonStandardOutputLayout) {\n const std::string kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)\n bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)\n copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)\n reshape.5 = s8[480,128]{1,0} reshape(copy.1)\n convert.8 = bf16[480,128]{1,0} convert(reshape.5)\n parameter_1 = bf16[16,128]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{0,1} dot(convert.8, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)\n p1 = bf16[16,128]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{0,1} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),\n HloOpcode::kReduce);\n EXPECT_EQ(module->entry_computation()->root_instruction()->shape().layout(),\n Layout({0, 1}));\n}\nTEST_F(SplitKTest, MakeSplitKWithExistingBatchDim) {\n const std::string hlo_text = R\"(\nHloModule m\ntriton_gemm_dot.24 {\n parameter_1 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)\n bitcast.3 = bf16[800,5,128]{2,1,0} bitcast(parameter_1)\n convert.3 = f32[800,5,128]{2,1,0} convert(bitcast.3)\n parameter_0 = f32[1,5,700,800]{3,2,1,0} parameter(0)\n bitcast.2 = f32[5,700,800]{2,1,0} bitcast(parameter_0)\n ROOT dot.26 = f32[5,128,700]{2,1,0} dot(convert.3, bitcast.2),\n lhs_batch_dims={1}, lhs_contracting_dims={0},\n rhs_batch_dims={0}, rhs_contracting_dims={2}\n}\nENTRY e {\n tmp_3 = f32[1,5,700,800]{3,2,1,0} parameter(0)\n tmp_0 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1)\n ROOT triton_gemm_dot.24 = f32[5,128,700]{2,1,0} fusion(tmp_3, tmp_0),\n kind=kCustom, calls=triton_gemm_dot.24,\n backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(32, 64, 64, 8, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),\n HloOpcode::kReduce);\n}\nTEST_F(SplitKTest, SupportsIndivisible) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[3,129,5,32]{3,2,1,0} parameter(0)\n bitcast.1 = s8[3,5,32,129]{2,1,3,0} bitcast(parameter_0)\n copy.1 = s8[3,5,32,129]{3,2,1,0} copy(bitcast.1)\n reshape.5 = s8[480,129]{1,0} reshape(copy.1)\n convert.8 = bf16[480,129]{1,0} convert(reshape.5)\n parameter_1 = bf16[16,129]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[3,129,5,32]{3,2,1,0} parameter(0)\n p1 = bf16[16,129]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n}\nTEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK4) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[480,129]{1,0} parameter(0)\n convert_0 = bf16[480,129]{1,0} convert(parameter_0)\n parameter_1 = bf16[16,129]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[480,129]{1,0} parameter(0)\n p1 = bf16[16,129]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n}\nTEST_F(SplitKTest, SupportsIndivisibleWithCustomLayout) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[480,129]{0,1} parameter(0)\n convert_0 = bf16[480,129]{0,1} convert(parameter_0)\n parameter_1 = bf16[16,129]{0,1} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[480,129]{0,1} parameter(0)\n p1 = bf16[16,129]{0,1} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n constexpr TritonGemmConfig kConfig(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), kConfig));\n TF_EXPECT_OK(HloVerifier(true,\n true,\n LayoutAssignment::InstructionCanChangeLayout)\n .Run(module.get())\n .status());\n}\nTEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK16) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[480,255]{1,0} parameter(0)\n convert_0 = bf16[480,255]{1,0} convert(parameter_0)\n parameter_1 = bf16[16,255]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[480,255]{1,0} parameter(0)\n p1 = bf16[16,255]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 16, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n}\nTEST_F(SplitKTest, SupportsIndivisibleWithTranspose) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[480,255]{1,0} parameter(0)\n convert_0 = bf16[480,255]{1,0} convert(parameter_0)\n transpose_0 = bf16[255,480]{1,0} transpose(convert_0), dimensions={1,0}\n parameter_1 = bf16[16,255]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(transpose_0, parameter_1),\n lhs_contracting_dims={0}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[480,255]{1,0} parameter(0)\n p1 = bf16[16,255]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 16, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n}\nTEST_F(SplitKTest, SupportIndivisibleWithBroadcast) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[] parameter(0)\n convert_0 = bf16[] convert(parameter_0)\n broadcast_0 = bf16[480,255]{1,0} broadcast(convert_0)\n parameter_1 = bf16[16,255]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(broadcast_0, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[] parameter(0)\n p1 = bf16[16,255]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 16, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n}\nTEST_F(SplitKTest, SupportsIndivisibleWithBitcast) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[3,5,480,17]{3,0,1,2} parameter(0)\n convert_0 = bf16[3,5,480,17]{3,0,1,2} convert(parameter_0)\n bitcast_0 = bf16[480,255]{1,0} bitcast(convert_0)\n parameter_1 = bf16[16,255]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(bitcast_0, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[3,5,480,17]{3,0,1,2} parameter(0)\n p1 = bf16[16,255]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 16, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n}\nTEST_F(SplitKTest, SkipSmallK) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[3,64,5,32]{3,2,1,0} parameter(0)\n bitcast.1 = s8[3,5,32,64]{2,1,3,0} bitcast(parameter_0)\n copy.1 = s8[3,5,32,64]{3,2,1,0} copy(bitcast.1)\n reshape.5 = s8[480,64]{1,0} reshape(copy.1)\n convert.8 = bf16[480,64]{1,0} convert(reshape.5)\n parameter_1 = bf16[16,64]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[3,64,5,32]{3,2,1,0} parameter(0)\n p1 = bf16[16,64]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 128, 4, 1, 4);\n EXPECT_THAT(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config),\n tsl::testing::StatusIs(\n tsl::error::CANCELLED,\n \"Too small divisible part of the contracting dimension.\"));\n}\nTEST_F(SplitKTest, FragmentedKSupported) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_gemm_dot {\n p0 = f16[7,2,16,4,20] parameter(0)\n t0 = f16[2,16,4,20,7] transpose(p0), dimensions={1,2,3,4,0}\n b0 = f16[2560,7] bitcast(t0)\n a1 = f16[2560,5] parameter(1)\n ROOT r = f16[7,5] dot(b0, a1),\n lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = f16[7,2,16,4,20] parameter(0)\n p1 = f16[2560,5] parameter(1)\n ROOT fusion = f16[7,5] fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(32, 32, 16, 1, 1, 4);\n config.split_k = 5;\n EXPECT_THAT(\n MakeDotSplitKBatch(module->entry_computation()->root_instruction(),\n config),\n tsl::testing::StatusIs(tsl::error::CANCELLED,\n \"Contracting dimension is too fragmented.\"));\n config.split_k = 8;\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n const HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_EQ(root->opcode(), HloOpcode::kReduce);\n const HloComputation* dot_computation = module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n TF_ASSERT_OK_AND_ASSIGN(\n const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation, config.split_k));\n EXPECT_EQ(dot_computation->root_instruction()->shape(),\n ShapeUtil::MakeShapeWithDescendingLayout(F16, {8, 7, 5}));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(1, 2560, 0,\n 2560,\n ElementsAre(20, 4, 4, 4, 2))));\n}\nTEST_F(SplitKTest, FragmentedKUnsupported) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_gemm_dot {\n p0 = f32[3,128,77] parameter(0)\n b0 = f32[384,77] bitcast(p0)\n a1 = f32[384,25] parameter(1)\n ROOT r = f32[77,25] dot(b0, a1),\n lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = f32[3,128,77] parameter(0)\n p1 = f32[384,25] parameter(1)\n ROOT fusion = f32[77,25] fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n EXPECT_THAT(\n MakeDotSplitKBatch(module->entry_computation()->root_instruction(),\n config),\n tsl::testing::StatusIs(tsl::error::CANCELLED,\n \"Contracting dimension is too fragmented.\"));\n}\nTEST_F(SplitKTest, MakeSplitKWithNonDefaultOutputLayout) {\n const std::string kHloText = R\"(\ntriton_gemm_dot.4842_computation {\n parameter_0 = bf16[96,96]{1,0} parameter(0)\n parameter_1 = bf16[96,7]{1,0} parameter(1)\n dot.0 = bf16[96,7]{0,1} dot(parameter_0, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT bitcast.2 = bf16[7,3,32]{2,1,0} bitcast(dot.0)\n}\nENTRY e {\n parameter_0.91 = bf16[96,96]{1,0} parameter(0)\n parameter_1.86 = bf16[96,7]{1,0} parameter(1)\n ROOT triton_gemm_dot.4842 = bf16[7,3,32]{2,1,0}\n fusion(parameter_0.91, parameter_1.86), kind=kCustom,\n calls=triton_gemm_dot.4842_computation\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 2, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),\n HloOpcode::kReduce);\n const HloComputation* dot_computation = module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->called_computations()[0];\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n}\nTEST_F(SplitKTest, SparseDotWithLhsSparseOperandIsRewritten) {\n const std::string hlo_text = R\"(\nHloModule test\ntriton_gemm {\n lhs = f16[2,5,1600] parameter(0)\n rhs = f16[2,3200,10] parameter(1)\n meta = u16[2,5,200] parameter(2)\n ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=L.2@2:4\n}\nENTRY e {\n lhs = f16[2,5,1600] parameter(0)\n rhs = f16[2,3200,10] parameter(1)\n meta = u16[2,5,200] parameter(2)\n ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),\n kind=kCustom, calls=triton_gemm, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 4, 1, 1);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n const HloInstruction* root = module->entry_computation()->root_instruction();\n EXPECT_EQ(root->opcode(), HloOpcode::kReduce);\n HloInstruction* dot =\n module->GetComputationWithName(\"triton_gemm\")->root_instruction();\n EXPECT_EQ(dot->operand(0)->shape(),\n ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 5, 4, 400}));\n EXPECT_EQ(dot->operand(1)->shape(),\n ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 4, 800, 10}));\n EXPECT_EQ(dot->operand(2)->shape(),\n ShapeUtil::MakeShapeWithDescendingLayout(U16, {2, 5, 4, 50}));\n}\nTEST_F(SplitKTest, SparseDotWithRhsSparseOperandTriggersError) {\n const std::string hlo_text = R\"(\nHloModule test\ntriton_gemm {\n lhs = f16[2,5,3200] parameter(0)\n rhs = f16[2,1600,10] parameter(1)\n meta = u16[2,200,10] parameter(2)\n ROOT dot = f32[2,5,10] dot(lhs, rhs, meta),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=R.1@2:4\n}\nENTRY e {\n lhs = f16[2,5,3200] parameter(0)\n rhs = f16[2,1600,10] parameter(1)\n meta = u16[2,200,10] parameter(2)\n ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta),\n kind=kCustom, calls=triton_gemm, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 4, 1, 1);\n auto result = MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config);\n EXPECT_FALSE(result.ok());\n}\nclass SplitKTestWithMorePreciseReduction\n : public HloTestBase,\n public ::testing::WithParamInterface {\n public:\n DebugOptions GetDebugOptionsForTest() override {\n DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();\n debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(\n true);\n return debug_options;\n }\n};\nTEST_F(SplitKTestWithMorePreciseReduction, MakeSplitK) {\n constexpr absl::string_view kHloText = R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0)\n bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0)\n copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1)\n reshape.5 = s8[480,128]{1,0} reshape(copy.1)\n convert.8 = bf16[480,128]{1,0} convert(reshape.5)\n parameter_1 = bf16[16,128]{1,0} parameter(1)\n ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = s8[3,128,5,32]{3,2,1,0} parameter(0)\n p1 = bf16[16,128]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));\n}\nTEST_F(SplitKTestWithMorePreciseReduction, MakeSplitKWithOutputFusion) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_gemm_dot {\n p0 = f16[480,128]{1,0} parameter(0)\n p1 = f16[16,128]{1,0} parameter(1)\n d = f16[480,16]{1,0} dot(p0, p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n c = bf16[] constant(123)\n n = bf16[] negate(c)\n bc = bf16[480,16]{1,0} broadcast(n)\n cv = bf16[480,16]{1,0} convert(d)\n ROOT a = bf16[480,16]{1,0} multiply(bc, cv)\n}\nENTRY e {\n p0 = f16[480,128]{1,0} parameter(0)\n p1 = f16[16,128]{1,0} parameter(1)\n ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot, backend_config=\"__triton_gemm\"\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 16, 16, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant()))));\n}\nTEST_F(SplitKTest, MakeSplitKWithTransposeAfterDot) {\n const std::string hlo_text = R\"(\ntriton_gemm_dot {\n p0 = f16[8,288,288]{2,1,0} parameter(0)\n p1 = f16[8,288,32]{2,0,1} parameter(1)\n d = f16[8,288,32]{2,1,0} dot(p0, p1),\n lhs_batch_dims={0}, lhs_contracting_dims={2},\n rhs_batch_dims={0}, rhs_contracting_dims={1}\n ROOT t = f16[288,8,32]{2,1,0} transpose(d), dimensions={1,0,2}\n}\nENTRY e {\n p0 = f16[8,288,288]{2,1,0} parameter(0)\n p1 = f16[8,288,32]{2,0,1} parameter(1)\n ROOT fusion = f16[288,8,32]{2,1,0} fusion(p0, p1),\n kind=kCustom, calls=triton_gemm_dot\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 128, 32, 8, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n const auto* transpose =\n Cast(module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->fused_instructions_computation()\n ->root_instruction());\n EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 2, 1, 3));\n}\nTEST_F(SplitKTest, MakeSplitKWithTrivialDimension) {\n const std::string hlo_text = R\"(\ntriton_gemm_dot {\n parameter_0 = f32[1001,1]{1,0} parameter(0)\n parameter_1 = f32[1001,2048]{1,0} parameter(1)\n ROOT dot = f32[1,2048]{1,0} dot(parameter_0, parameter_1),\n lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\nENTRY %entry_computation {\n p0 = f32[1001,1]{1,0} parameter(0)\n p1 = f32[1001,2048]{1,0} parameter(1)\n ROOT fusion = f32[1,2048]{1,0} fusion(p0, p1), kind=kCustom,\n calls=triton_gemm_dot\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n TritonGemmConfig config(16, 128, 64, 4, 1, 4);\n TF_EXPECT_OK(MakeDotSplitKBatch(\n module->entry_computation()->root_instruction(), config));\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(m::Reduce(m::Fusion(), m::Constant())));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/split_k_gemm_rewriter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/split_k_gemm_rewriter_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1190,"cells":{"ID":{"kind":"string","value":"9c586ff2-998f-46bb-8cd1-365898f411cf"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"triton_fusion_analysis"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/triton_fusion_analysis.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/triton_fusion_analysis.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/service/gpu/cudnn_support_utils.h\"\n#include \"xla/service/gpu/matmul_utils.h\"\n#include \"xla/service/gpu/triton_tiling_propagation.h\"\n#include \"xla/service/instruction_fusion.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/tools/hlo_decomposer.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing triton_fusion::DimOrdersAndReqs;\nusing triton_fusion::DimOrdersAndReqsOrError;\nusing triton_fusion::DotRequirements;\nusing triton_fusion::FusionContext;\nusing triton_fusion::GetPropagatedDimOrdersAndRequirements;\nusing triton_fusion::kNoSplitRequirement;\nusing triton_fusion::TransformDirection;\n} \nnamespace triton_fusion {\n absl::StatusOr FusionContext::FromDotOperand(\n const HloInstruction& dot, const int operand_number, const int split_k) {\n const int num_split_k_batch_dims = split_k > 1;\n int split_k_dimension_index = kNoDimensionIndex;\n TF_ASSIGN_OR_RETURN(int contracting_dimension_index,\n ContractingDimensionIndex(dot, operand_number));\n TF_ASSIGN_OR_RETURN(int non_contracting_dimension_index,\n NonContractingDimensionIndex(dot, operand_number));\n if (split_k > 1) {\n split_k_dimension_index = contracting_dimension_index - 1;\n }\n int splittable_dimension_index = kNoDimensionIndex;\n if (operand_number == 0 &&\n dot.dot_dimension_numbers().lhs_batch_dimensions_size() -\n num_split_k_batch_dims ==\n 0) {\n splittable_dimension_index = non_contracting_dimension_index;\n }\n FusionContext context(DotProperties{non_contracting_dimension_index,\n splittable_dimension_index},\n DotRequirements(kNoSplitRequirement));\n context.dim_orders_[dot.operand(operand_number)] =\n DimensionOrder::FromDotOperandOrOutput(*dot.operand(operand_number),\n split_k_dimension_index);\n return context;\n}\n FusionContext FusionContext::FromDotOutput(\n const HloInstruction& dot, const int split_k,\n DotRequirements requirements) {\n int splittable_dimension_index = kNoDimensionIndex;\n if (requirements.splittable_dimension_major_part_size > 1) {\n splittable_dimension_index = (split_k > 1) ? 1 : 0;\n }\n FusionContext context(DotProperties{-1,\n splittable_dimension_index},\n std::move(requirements));\n context.dim_orders_[&dot] = DimensionOrder::FromDotOperandOrOutput(dot);\n return context;\n}\nnamespace {\nint64_t NumAddedParameters(const HloInstruction& hlo) {\n if (hlo.opcode() == HloOpcode::kConstant &&\n !ShapeUtil::IsScalar(hlo.shape())) {\n return 0;\n }\n return hlo.operand_count() - 1;\n}\n} \nbool FusionContext::CombineDimOrdersAndReqs(const DimOrdersAndReqs& update) {\n for (const auto& [key, value] : update.dim_orders) {\n auto it = dim_orders_.find(key);\n if (it != dim_orders_.cend() && !it->second.IsPhysicallyEquivalent(value)) {\n return false;\n }\n }\n DotRequirementsOrError requirements_or_error =\n CombineDotRequirements(requirements_, update.requirements);\n if (std::holds_alternative(requirements_or_error)) {\n return false;\n }\n requirements_ = std::move(std::get(requirements_or_error));\n dim_orders_.insert(update.dim_orders.begin(), update.dim_orders.end());\n return true;\n}\nabsl::Status FusionContext::PropagateDimensionOrdersToParameters(\n const HloInstruction& origin, ConstHloInstructionSet& parameters,\n ConstHloInstructionMap& iter_specs) {\n absl::flat_hash_set visited;\n std::queue to_process;\n visited.insert(&origin);\n to_process.push(&origin);\n while (!to_process.empty()) {\n const HloInstruction* hlo = to_process.front();\n to_process.pop();\n if (hlo->opcode() == HloOpcode::kParameter) {\n if (!parameters.insert(hlo).second) {\n return FailedPrecondition(\n \"A parameter is read differently by different users. hlo: %s\",\n hlo->ToString());\n }\n VLOG(5) << hlo->ToString();\n }\n DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(\n *hlo, dim_orders_.at(hlo), TransformDirection::kOutputToInput,\n properties_);\n if (!std::holds_alternative(result)) {\n return FailedPrecondition(\n \"Can not propagate dim orders and requirements.\");\n }\n if (!CombineDimOrdersAndReqs(std::get(result))) {\n return FailedPrecondition(\"Can not combine dim orders and requirements.\");\n }\n iter_specs[hlo] = dim_orders_.at(hlo).ToTensorIterationSpec();\n for (const HloInstruction* operand : hlo->operands()) {\n if (!visited.insert(operand).second) {\n continue;\n }\n if (operand->opcode() == HloOpcode::kDot) {\n continue;\n }\n to_process.push(operand);\n }\n }\n return absl::OkStatus();\n}\n} \nabsl::StatusOr TritonFusionAnalysis::Execute(\n const HloComputation& computation, const int split_k) {\n VLOG(5) << computation.ToString(HloPrintOptions::ShortParsable());\n TritonFusionAnalysis analysis;\n const HloInstruction* dot =\n hlo_query::GetFirstInstructionWithOpcode(computation, HloOpcode::kDot);\n TF_RET_CHECK(dot != nullptr);\n TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(*dot, split_k));\n return analysis;\n}\nabsl::StatusOr TritonFusionAnalysis::Execute(\n const HloDotInstruction& dot, int split_k) {\n TritonFusionAnalysis analysis;\n TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(dot, split_k));\n return analysis;\n}\nabsl::Status TritonFusionAnalysis::ExecuteForProducerConsumer(\n const HloInstruction& producer, const HloInstruction& consumer,\n int split_k) {\n std::unique_ptr new_module =\n ExtractProducerConsumerIntoNewModule(producer, consumer);\n auto* new_producer =\n new_module->entry_computation()->GetInstructionWithName(producer.name());\n auto* new_consumer =\n new_module->entry_computation()->GetInstructionWithName(consumer.name());\n std::unique_ptr fusion_instruction_holder;\n HloInstruction* fusion_instruction;\n if (new_consumer->opcode() == HloOpcode::kFusion) {\n fusion_instruction = new_consumer;\n } else {\n fusion_instruction_holder = HloInstruction::CreateFusion(\n new_consumer->shape(), new_producer->fusion_kind(), new_consumer);\n fusion_instruction = fusion_instruction_holder.get();\n }\n if (new_producer->opcode() == HloOpcode::kFusion) {\n fusion_instruction->MergeFusionInstruction(new_producer);\n } else {\n fusion_instruction->FuseInstruction(new_producer);\n }\n auto* fused_computation =\n fusion_instruction->fused_instructions_computation();\n return Execute(*fused_computation, split_k).status();\n}\nbool TritonFusionAnalysis::IsBatchDimMinorForInt4Parameter(\n const HloInstruction& dot, Scope scope) const {\n CHECK(scope == Scope::LHS || scope == Scope::RHS);\n const auto& dims = dot.dot_dimension_numbers();\n const auto& batch_dims = (scope == Scope::LHS) ? dims.lhs_batch_dimensions()\n : dims.rhs_batch_dimensions();\n if (batch_dims.empty()) return true;\n int32_t batch_dim = batch_dims.Get(0);\n CHECK_EQ(batch_dims.size(), 1);\n const auto& params = parameters_.at(scope);\n for (const auto& param : params) {\n if (param->shape().element_type() != S4) continue;\n const auto* strides = IterSpec(scope, param, batch_dim);\n if (strides == nullptr) continue;\n if (strides->front().stride == 1) return false;\n }\n return true;\n}\nabsl::Status TritonFusionAnalysis::ExecuteForDotFusion(\n const HloInstruction& dot, const int split_k) {\n DotRequirements lhs_requirements(kNoSplitRequirement);\n for (const Scope scope : {Scope::LHS, Scope::RHS, Scope::META}) {\n const int operand_number = static_cast(scope);\n if (dot.operand_count() < operand_number + 1) {\n continue; \n }\n TF_ASSIGN_OR_RETURN(auto context, FusionContext::FromDotOperand(\n dot, operand_number, split_k));\n TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(\n *dot.operand(operand_number), parameters_[scope], iter_specs_[scope]));\n if (scope == Scope::LHS) {\n lhs_requirements = context.requirements();\n }\n }\n auto context = FusionContext::FromDotOutput(dot, split_k, lhs_requirements);\n const HloInstruction* output = &dot;\n while (!output->IsRoot()) {\n TF_RET_CHECK(output->user_count() == 1);\n const HloInstruction* input = output;\n if (IsWorkspaceAllocationRoot(*output->users()[0])) {\n break;\n }\n output = output->users()[0];\n DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(\n *output, context.dim_orders().at(input),\n TransformDirection::kInputToOutput, context.dot_properties());\n if (std::holds_alternative(result)) {\n auto decision = std::get(result);\n return FailedPrecondition(\"Failed to propagate tiling with error: %s\",\n decision.Explain());\n }\n TF_RET_CHECK(\n context.CombineDimOrdersAndReqs(std::get(result)));\n }\n TF_RET_CHECK(\n iter_specs_[Scope::OUTPUT]\n .insert(\n {output, context.dim_orders().at(output).ToTensorIterationSpec()})\n .second);\n parameters_[Scope::OUTPUT] = {};\n if (output != &dot) {\n TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(\n *output, parameters_[Scope::OUTPUT], iter_specs_[Scope::OUTPUT]));\n }\n return absl::OkStatus();\n}\nstd::optional\nTritonFusionAnalysis::QueryInstructionScope(const HloInstruction& hlo) const {\n for (const Scope& scope : {Scope::LHS, Scope::RHS, Scope::OUTPUT}) {\n if (iter_specs_.at(scope).count(&hlo) > 0) {\n return scope;\n }\n }\n LOG(WARNING) << \"No scope for hlo: \" << hlo.ToString();\n return std::nullopt;\n}\nconst TensorIterationSpec::DimIterationSpec* TritonFusionAnalysis::IterSpec(\n const TritonFusionAnalysis::Scope scope, const HloInstruction* hlo,\n const int dimension) const {\n auto hlo_spec = iter_specs_.at(scope).find(hlo);\n if (hlo_spec != iter_specs_.at(scope).cend()) {\n return hlo_spec->second.Find(dimension);\n }\n return nullptr;\n}\nnamespace {\nstd::string IterationSpecByInstructionMapToString(\n const TritonFusionAnalysis::IterationSpecByInstructionMap& m) {\n return absl::StrCat(\"IterSpec{\",\n absl::StrJoin(m, \", \",\n [&](std::string* s, const auto& kv) {\n absl::StrAppend(s, kv.first->name(), \": \",\n kv.second.ToString());\n }),\n \"}\");\n}\nstd::string ScopeToString(TritonFusionAnalysis::Scope s) {\n switch (s) {\n case TritonFusionAnalysis::Scope::LHS:\n return \"LHS\";\n case TritonFusionAnalysis::Scope::RHS:\n return \"RHS\";\n case TritonFusionAnalysis::Scope::META:\n return \"META\";\n case TritonFusionAnalysis::Scope::OUTPUT:\n return \"OUTPUT\";\n }\n}\n} \nstd::string TritonFusionAnalysis::ToString() const {\n return absl::StrCat(\n \"TritonFusionAnalysis{\\n\",\n absl::StrJoin(iter_specs_, \",\\n\",\n [&](std::string* s, const auto& kv) {\n absl::StrAppend(\n s, ScopeToString(kv.first), \": \",\n IterationSpecByInstructionMapToString(kv.second));\n }),\n \"\\n}\");\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/triton_fusion_analysis.h\"\n#include \n#include \n#include \n#include \n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/gpu/transforms/gemm_fusion.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/verified_hlo_module.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing ::testing::ElementsAre;\nusing ::testing::FieldsAre;\nusing TritonDotAnalysisTest = HloTestBase;\nTEST_F(TritonDotAnalysisTest, QueryingOutputScopeParametersAlwaysWorks) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\ntriton_dot {\n p0 = f32[8,8] parameter(0)\n ROOT dot = f32[8,8] dot(p0, p0),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = f32[8,8] parameter(0)\n ROOT r = f32[8,8] fusion(p0), kind=kCustom, calls=triton_dot\n})\"));\n TF_ASSERT_OK_AND_ASSIGN(\n const auto analysis,\n TritonFusionAnalysis::Execute(*module->entry_computation()\n ->root_instruction()\n ->called_computations()[0]));\n EXPECT_TRUE(\n analysis.ScopeParameters(TritonFusionAnalysis::Scope::OUTPUT).empty());\n}\nTEST_F(TritonDotAnalysisTest, NopBitcasts) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_dot {\n param_0.1 = s8[48,4]{1,0} parameter(0)\n bitcast.18 = s8[1,48,4]{2,1,0} bitcast(param_0.1)\n bitcast.19 = s8[48,4]{1,0} bitcast(bitcast.18)\n convert.4 = bf16[48,4]{1,0} convert(bitcast.19)\n param_1.1 = bf16[4,3]{1,0} parameter(1)\n ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = s8[48,4]{1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n custom-call = bf16[48,3]{1,0} custom-call(p0, p1),\n custom_call_target=\"__triton\",\n called_computations={triton_dot}\n ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation = module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),\n ElementsAre(FieldsAre(4, 48, 0,\n 48, ElementsAre(48))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(1, 4, 0,\n 4, ElementsAre(4))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),\n ElementsAre(FieldsAre(3, 4, 0,\n 4, ElementsAre(4))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(1, 3, 0,\n 3, ElementsAre(3))));\n}\nTEST_F(TritonDotAnalysisTest, DoNotRemoveTrivialDimensionForDot) {\n const std::string hlo_text = R\"(\nHloModule t, is_scheduled=true\ntriton_dot {\n param_0.1 = f32[137,115]{1,0} parameter(0)\n param_1.1 = f32[1,115]{1,0} parameter(1)\n ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n}\nENTRY e {\n p0 = f32[137,115]{1,0} parameter(0)\n p1 = f32[1,115]{1,0} parameter(1)\n ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom,\n calls=triton_dot,\n backend_config={\"fusion_backend_config\": {kind: \"__triton_gemm\",\n triton_gemm_config: {\"block_m\":16,\"block_n\":64,\"block_k\":32,\n \"split_k\":1,\"num_stages\":1,\"num_warps\":2,\n \"num_ctas\":1}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),\n ElementsAre(FieldsAre(115, 137, 0,\n 137, ElementsAre(137))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(1, 115, 0,\n 115, ElementsAre(115))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),\n ElementsAre(FieldsAre(115, 1, 0,\n 1, ElementsAre(1))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(1, 115, 0,\n 115, ElementsAre(115))));\n}\nTEST_F(TritonDotAnalysisTest, Merge) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_dot {\n param_0.1 = s8[1,8,6,4]{3,2,1,0} parameter(0)\n bitcast.18 = s8[48,4]{1,0} bitcast(param_0.1)\n convert.4 = bf16[48,4]{1,0} convert(bitcast.18)\n param_1.1 = bf16[4,3]{1,0} parameter(1)\n ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = s8[1,8,6,4]{3,2,1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n custom-call = bf16[48,3]{1,0} custom-call(p0, p1),\n custom_call_target=\"__triton\",\n called_computations={triton_dot}\n ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation = module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),\n ElementsAre(FieldsAre(4, 6 * 8,\n 0, 6 * 8,\n ElementsAre(6, 8))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(1, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),\n ElementsAre(FieldsAre(3, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(1, 3,\n 0, 3,\n ElementsAre(3))));\n}\nTEST_F(TritonDotAnalysisTest, Split) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_dot {\n %parameter_1 = f32[24000,2]{1,0} parameter(1)\n %convert.15 = f16[24000,2]{1,0} convert(%parameter_1)\n %parameter_0 = f16[4]{0} parameter(0)\n %bitcast.45 = f16[2,2]{1,0} bitcast(%parameter_0)\n ROOT %dot.26 = f16[24000,2]{1,0} dot(%convert.15, %bitcast.45),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = f16[4]{0} parameter(0)\n p1 = f32[24000,2]{1,0} parameter(1)\n ROOT r = f16[24000,2]{1,0} custom-call(p0, p1),\n custom_call_target=\"__triton\",\n called_computations={triton_dot}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p1);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p0);\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 0),\n ElementsAre(FieldsAre(2, 24000,\n 0, 24000,\n ElementsAre(24000))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 1),\n ElementsAre(FieldsAre(1, 2,\n 0, 2,\n ElementsAre(2))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 0),\n ElementsAre(FieldsAre(2, 2,\n 0, 2,\n ElementsAre(2))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 1),\n ElementsAre(FieldsAre(1, 2,\n 0, 2,\n ElementsAre(2))));\n}\nTEST_F(TritonDotAnalysisTest, TransposeMerge) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_dot {\n param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)\n transpose.3 = s8[1,8,6,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}\n bitcast.18 = s8[48,4]{1,0} bitcast(transpose.3)\n convert.4 = bf16[48,4]{1,0} convert(bitcast.18)\n param_1.1 = bf16[4,3]{1,0} parameter(1)\n ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n custom-call = bf16[48,3]{1,0} custom-call(p0, p1),\n custom_call_target=\"__triton\",\n called_computations={triton_dot}\n ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation = module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),\n ElementsAre(FieldsAre(1, 8 * 6,\n 0, 8 * 6,\n ElementsAre(6, 8))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(8 * 6, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),\n ElementsAre(FieldsAre(3, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(1, 3,\n 0, 3,\n ElementsAre(3))));\n}\nTEST_F(TritonDotAnalysisTest, CopyMerge) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_dot {\n param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)\n bitcast.99 = s8[1,8,6,4]{2,1,3,0} bitcast(param_0.1)\n copy.3 = s8[1,8,6,4]{3,2,1,0} copy(bitcast.99)\n bitcast.18 = s8[48,4]{1,0} bitcast(copy.3)\n convert.4 = bf16[48,4]{1,0} convert(bitcast.18)\n param_1.1 = bf16[4,3]{1,0} parameter(1)\n ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n custom-call = bf16[48,3]{1,0} custom-call(p0, p1),\n custom_call_target=\"__triton\",\n called_computations={triton_dot}\n ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation = module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),\n ElementsAre(FieldsAre(1, 8 * 6,\n 0, 8 * 6,\n ElementsAre(6, 8))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(8 * 6, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),\n ElementsAre(FieldsAre(3, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(1, 3,\n 0, 3,\n ElementsAre(3))));\n}\nTEST_F(TritonDotAnalysisTest, TransposeMergeNCN) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_dot {\n param_0.1 = bf16[3,4,8,1]{3,2,1,0} parameter(0)\n transpose.3 = bf16[3,8,1,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}\n bitcast.18 = bf16[24,4]{1,0} bitcast(transpose.3)\n param_1.1 = bf16[4,3]{1,0} parameter(1)\n ROOT dot = bf16[24,3]{1,0} dot(bitcast.18, param_1.1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = bf16[3,4,8,1]{3,2,1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n custom-call = bf16[24,3]{1,0} custom-call(p0, p1),\n custom_call_target=\"__triton\", called_computations={triton_dot}\n ROOT bitcast.2 = bf16[3,8,1,3]{3,2,1,0} bitcast(custom-call)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation = module->entry_computation()\n ->root_instruction()\n ->operand(0)\n ->called_computations()[0];\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),\n ElementsAre(FieldsAre(1, 8,\n 0, 8,\n ElementsAre(8)),\n FieldsAre(4 * 8, 3,\n 0, 3,\n ElementsAre(3))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(8, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),\n ElementsAre(FieldsAre(3, 4,\n 0, 4,\n ElementsAre(4))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(1, 3,\n 0, 3,\n ElementsAre(3))));\n}\nTEST_F(TritonDotAnalysisTest, TransposeOutput) {\n const std::string hlo_text = R\"(\nHloModule t\ntriton_dot {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n dot = bf16[24,3]{1,0} dot(p0, p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n bc = bf16[12,2,3]{2,1,0} bitcast(dot)\n ROOT t = bf16[3,12,2]{2,1,0} transpose(bc), dimensions={2,0,1}\n}\nENTRY e {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n ROOT r = bf16[3,12,2]{2,1,0} fusion(p0, p1), kind=kCustom,\n calls=triton_dot\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n const HloInstruction* dot_output = dot_computation->root_instruction();\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 0),\n ElementsAre(FieldsAre(1, 24, 0,\n 24,\n ElementsAre(2, 12))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 1),\n ElementsAre(FieldsAre(24, 3, 0,\n 3,\n ElementsAre(3))));\n}\nTEST_F(TritonDotAnalysisTest, OutputParameterIsHandled) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\nHloModule t\ntriton_dot {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n dot = bf16[24,3]{1,0} dot(p0, p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n p2 = f16[3,24]{1,0} parameter(2)\n p2t = f16[24,3]{1,0} transpose(p2), dimensions={1,0}\n p2tc = bf16[24,3]{1,0} convert(p2t)\n ROOT r = bf16[24,3]{1,0} divide(p2tc, dot)\n}\nENTRY e {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[4,3]{1,0} parameter(1)\n p2 = f16[3,24]{1,0} parameter(2)\n ROOT r = bf16[24,3]{1,0} fusion(p0, p1, p2), kind=kCustom,\n calls=triton_dot\n})\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n const HloInstruction* output_param =\n dot_computation->parameter_instruction(2);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(\n analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0)\n ->size(),\n 1);\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0),\n ElementsAre(FieldsAre(1, 24, 0,\n 24,\n ElementsAre(24))));\n EXPECT_EQ(\n analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1)\n ->size(),\n 1);\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1),\n ElementsAre(FieldsAre(24, 3, 0,\n 3,\n ElementsAre(3))));\n}\nTEST_F(TritonDotAnalysisTest, InputBroadcastFromScalarIsHandled) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\nHloModule t\ntriton_dot {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[] parameter(1)\n p1b = bf16[4,3] broadcast(p1)\n ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[] parameter(1)\n ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,\n calls=triton_dot\n})\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n const HloInstruction* scalar = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 0),\n nullptr);\n EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 1),\n nullptr);\n}\nTEST_F(TritonDotAnalysisTest, InputBroadcastFromVectorIsHandled) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\nHloModule t\ntriton_dot {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[4] parameter(1)\n p1b = bf16[4,3] broadcast(p1), dimensions={0}\n ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY e {\n p0 = bf16[24,4]{1,0} parameter(0)\n p1 = bf16[4] parameter(1)\n ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,\n calls=triton_dot\n})\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n const HloInstruction* vector = dot_computation->parameter_instruction(1);\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_EQ(\n analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0)->size(),\n 1);\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0),\n ElementsAre(FieldsAre(1, 4,\n 0, 4,\n ElementsAre(4))));\n}\nTEST_F(TritonDotAnalysisTest, OutputBroadcastIsNotAccepted) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\nHloModule t\nENTRY e {\n p0 = f16[2,35] parameter(0)\n p0c = bf16[2,35] convert(p0)\n p1 = bf16[35,2] parameter(1)\n dot = bf16[2,2] dot(p0c, p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT bc = bf16[2,2,100] broadcast(dot), dimensions={0,1}\n})\"));\n EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{\n se::CudaComputeCapability::AMPERE, 0})\n .Run(module.get())\n .value());\n EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),\n HloOpcode::kBroadcast);\n}\nTEST_F(TritonDotAnalysisTest, DegenerateSplitFragmentIsHandled) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\ntriton_gemm_r {\n Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)\n bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)\n copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)\n bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)\n convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)\n bitcast.32 = bf16[58,913]{1,0} parameter(1)\n dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)\n copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)\n ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)\n}\nENTRY e {\n Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)\n Arg_1.2 = bf16[58,913]{1,0} parameter(1)\n ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,\n calls=triton_gemm_r,\n backend_config={kind: \"__triton_gemm\"}\n})\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT,\n dot_computation->root_instruction(), 0),\n ElementsAre(FieldsAre(1, 8 * 21,\n 0, 8 * 21,\n ElementsAre(21, 8)),\n FieldsAre(8 * 21 * 58, 30,\n 0, 30,\n ElementsAre(30))));\n}\nTEST_F(TritonDotAnalysisTest,\n HandlesFurtherPropagationFromTrivialSizedTensorGracefully) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\ntriton_gemm_r {\n a = f32[3,3]{1,0} parameter(0)\n constant = f32[1,1]{1,0} constant({ {0} })\n broadcast = f32[1,1]{1,0} broadcast(constant), dimensions={0,1}\n reshape = f32[] reshape(broadcast)\n broadcast2 = f32[3,3]{1,0} broadcast(reshape), dimensions={}\n ROOT dot = f32[3,3]{1,0} dot(a, broadcast2),\n lhs_contracting_dims={0}, rhs_contracting_dims={0}\n}\nENTRY e {\n a = f32[3,3]{1,0} parameter(0)\n ROOT dot = f32[3,3]{1,0} fusion(a), kind=kCustom, calls=triton_gemm_r,\n backend_config={kind: \"__triton_gemm\"}\n}\n)\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n absl::StatusOr analysis =\n TritonFusionAnalysis::Execute(*dot_computation);\n (void)analysis;\n}\nTEST_F(TritonDotAnalysisTest, DynamicSliceIsSupported) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\ntriton_gemm {\n dot_lhs = f32[2,18]{1,0} parameter(0)\n dynamic_slice_input = f32[96,2]{1,0} parameter(1)\n start_index0 = s32[] parameter(2)\n start_index1 = s32[] parameter(3)\n dynamic_slice = f32[64,2]{1,0} dynamic-slice(dynamic_slice_input,\n start_index0, start_index1),\n dynamic_slice_sizes={64,2}\n ROOT dot = f32[18,64]{1,0} dot(dot_lhs, dynamic_slice),\n lhs_contracting_dims={0}, rhs_contracting_dims={1}\n}\nENTRY e {\n dot_lhs = f32[2,18]{1,0} parameter(0)\n dynamic_slice_input = f32[96,2]{1,0} parameter(1)\n start_index0 = s32[] parameter(2)\n start_index1 = s32[] parameter(3)\n ROOT triton_gemm_d = f32[18,64]{1,0} fusion(dot_lhs, dynamic_slice_input,\n start_index0, start_index1),\n kind=kCustom,\n calls=triton_gemm,\n backend_config={\"kind\":\"__triton_gemm\"}\n}\n)\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),\n ElementsAre(FieldsAre(18, 2,\n 0, 2,\n ElementsAre(2))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(1, 18,\n 0, 18,\n ElementsAre(18))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),\n ElementsAre(FieldsAre(2, 96,\n 0, 96,\n ElementsAre(96))));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(1, 2,\n 0, 2,\n ElementsAre(2))));\n}\nTEST_F(TritonDotAnalysisTest, SparseDot) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\ntriton_gemm {\n lhs = bf16[5,16] parameter(0)\n rhs = bf16[32,10] parameter(1)\n meta = u16[5,2] parameter(2)\n ROOT dot = f32[5,10] dot(lhs, rhs, meta),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4\n}\nENTRY main {\n lhs = bf16[5,16] parameter(0)\n rhs = bf16[32,10] parameter(1)\n meta = u16[5,2] parameter(2)\n ROOT out = f32[5,10] fusion(lhs, rhs, meta),\n kind=kCustom, calls=triton_gemm, backend_config={kind:\"__triton_gemm\"}\n}\n)\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::META,\n dot_computation->parameter_instruction(2), 0),\n ::testing::SizeIs(1));\n}\nTEST_F(TritonDotAnalysisTest, QueryScopeAlwaysWorks) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\ntriton_gemm_r {\n Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)\n bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)\n copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)\n bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)\n convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)\n bitcast.32 = bf16[58,913]{1,0} parameter(1)\n dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)\n copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)\n ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)\n}\nENTRY e {\n Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)\n Arg_1.2 = bf16[58,913]{1,0} parameter(1)\n ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,\n calls=triton_gemm_r,\n backend_config={kind: \"__triton_gemm\"}\n})\"));\n const HloComputation* dot_computation =\n module->entry_computation()->root_instruction()->called_computations()[0];\n TF_ASSERT_OK_AND_ASSIGN(const auto analysis,\n TritonFusionAnalysis::Execute(*dot_computation));\n for (const auto& hlo : dot_computation->instructions()) {\n if (hlo->opcode() != HloOpcode::kDot) {\n EXPECT_TRUE(analysis.QueryInstructionScope(*hlo).has_value());\n }\n }\n}\nTEST_F(TritonDotAnalysisTest, PadWithTrivialDimension) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\nHloModule t\ntriton_gemm_dot {\n parameter_0 = f32[1001,1]{1,0} parameter(0)\n constant = f32[] constant(0)\n pad = f32[1004,1]{1,0} pad(parameter_0, constant), padding=0_3x0_0\n bitcast = f32[4,251,1]{2,1,0} bitcast(pad)\n parameter_1 = f32[4,251,2048]{2,1,0} parameter(1)\n ROOT dot = f32[4,1,2048]{2,1,0} dot(bitcast, parameter_1),\n lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0},\n rhs_contracting_dims={1}\n})\"));\n const HloComputation* dot_computation = *module->computations().begin();\n TF_ASSERT_OK_AND_ASSIGN(\n TritonFusionAnalysis analysis,\n TritonFusionAnalysis::Execute(*dot_computation, 4));\n const HloInstruction* p0 = dot_computation->parameter_instruction(0);\n const HloInstruction* p1 = dot_computation->parameter_instruction(1);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),\n p0);\n EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),\n p1);\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),\n ElementsAre(FieldsAre(1, 1001, 0,\n 1001, ElementsAre(1001))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 2),\n ElementsAre(FieldsAre(1, 1, 0,\n 1, ElementsAre(1))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),\n ElementsAre(FieldsAre(2048, 1004, 0,\n 1004, ElementsAre(251, 4))));\n EXPECT_THAT(\n *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 2),\n ElementsAre(FieldsAre(1, 2048, 0,\n 2048, ElementsAre(2048))));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1191,"cells":{"ID":{"kind":"string","value":"873af691-a7ed-4983-a8cd-f970eb8fa7af"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"gpu_latency_hiding_scheduler"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/gpu_latency_hiding_scheduler.h\"\n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/gpu/cublas_cudnn.h\"\n#include \"xla/service/latency_hiding_scheduler.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nstatic constexpr int64_t kCostlyAllReduceThreshold = 30 * 1024 * 1024;\nstatic constexpr int64_t kCostlyAllReduceMultiplier = 4;\nbool IsNopInstruction(const HloInstruction& hlo) {\n HloOpcode op = hlo.opcode();\n return op == HloOpcode::kGetTupleElement || op == HloOpcode::kBitcast ||\n op == HloOpcode::kConstant || op == HloOpcode::kParameter ||\n op == HloOpcode::kTuple || op == HloOpcode::kPartitionId ||\n op == HloOpcode::kReplicaId || hlo.IsEffectiveBitcast() ||\n op == HloOpcode::kOptimizationBarrier;\n}\nbool IsAsyncComputeOp(const HloInstruction& hlo) {\n return (hlo.opcode() == HloOpcode::kAsyncStart ||\n hlo.opcode() == HloOpcode::kAsyncDone) &&\n !hlo_query::IsCollectiveCommunicationOp(hlo.async_wrapped_opcode()) &&\n hlo.async_execution_thread() != hlo.parent()->execution_thread();\n}\nint64_t GetPipelineStream(const HloInstruction& start) {\n auto it = start.frontend_attributes().map().find(kSendRecvPipelineAttr);\n if (it != start.frontend_attributes().map().end() && it->second == \"1\") {\n return 1;\n }\n return 0;\n}\nstd::pair GetP2PResourceAndUsage(\n const HloInstruction& instr, const CanonicalAsyncOp& op) {\n ResourceUsageType usage = op.outer == HloOpcode::kAsyncStart\n ? ResourceUsageType::kResourceRelease\n : ResourceUsageType::kResourceOccupy;\n int64_t pipeline = GetPipelineStream(instr);\n HloOpcode opcode = op.inner;\n GpuResourceType resource;\n if (pipeline == 0) {\n resource = opcode == HloOpcode::kSend\n ? GpuResourceType::kGpuAsyncStreamSend0\n : GpuResourceType::kGpuAsyncStreamRecv0;\n } else {\n resource = opcode == HloOpcode::kSend\n ? GpuResourceType::kGpuAsyncStreamSend1\n : GpuResourceType::kGpuAsyncStreamRecv1;\n }\n return {resource, usage};\n}\nbool IsGpuAsyncStart(const HloInstruction& hlo) {\n return (hlo_query::IsAsyncCollectiveStartOp(&hlo,\n true) &&\n !IsSyncCollective(&hlo)) ||\n IsAsyncComputeOp(hlo);\n}\nbool IsGpuAsyncDone(const HloInstruction& hlo) {\n return (hlo_query::IsAsyncCollectiveDoneOp(&hlo,\n true) &&\n !IsSyncCollective(hlo.operand(0))) ||\n IsAsyncComputeOp(hlo);\n}\nbool IsAsyncPair(const HloInstruction& from, const HloInstruction& target) {\n return IsGpuAsyncStart(from) && IsGpuAsyncDone(target);\n}\n} \nint64_t GetSizeOfShape(const Shape& shape, int pointer_size) {\n int64_t size = ShapeUtil::ByteSizeOf(shape, pointer_size);\n if (shape.IsTuple() || shape.is_static()) {\n return size;\n }\n int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();\n return size + metadata_size;\n}\nCanonicalAsyncOp GpuGetCanonicalAsyncOp(const HloInstruction& hlo) {\n switch (hlo.opcode()) {\n case HloOpcode::kSend:\n return {HloOpcode::kAsyncStart, HloOpcode::kSend};\n case HloOpcode::kSendDone:\n return {HloOpcode::kAsyncDone, HloOpcode::kSend};\n case HloOpcode::kRecv:\n return {HloOpcode::kAsyncStart, HloOpcode::kRecv};\n case HloOpcode::kRecvDone:\n return {HloOpcode::kAsyncDone, HloOpcode::kRecv};\n default:\n return DefaultGetCanonicalAsyncOp(hlo);\n }\n}\nGpuAsyncTrackerBase::GpuAsyncTrackerBase(const SchedulerConfig& config,\n GetCanonicalAsyncOpFunc func)\n : AsyncTracker(config, func) {}\nbool GpuAsyncTrackerBase::IsSupportedAsyncDone(\n const HloInstruction& hlo) const {\n return IsGpuAsyncDone(hlo);\n}\nbool GpuAsyncTrackerBase::IsSupportedAsyncStart(\n const HloInstruction& hlo) const {\n return IsGpuAsyncStart(hlo);\n}\nvoid GpuAsyncTrackerBase::PostProcessScheduleGraph(\n HloScheduleGraph* schedule_graph,\n const LatencyEstimator* latency_estimator) const {\n for (auto inst : schedule_graph->GetOriginalInstrList()) {\n if (inst->opcode() == HloOpcode::kRecv) {\n if (inst->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) {\n HloGraphNode& node = schedule_graph->GetNode(inst);\n node.SetForceEarly(true);\n VLOG(5) << \"Setting force early for instruction: \" << inst->ToString();\n }\n }\n if (inst->has_backend_config()) {\n auto gpu_config = inst->backend_config();\n if (gpu_config.ok()) {\n HloGraphNode& node = schedule_graph->GetNode(inst);\n node.SetForceDelay(gpu_config->force_earliest_schedule());\n VLOG(5) << \"Setting force delay for instruction: \" << inst->ToString();\n }\n }\n }\n}\nGpuAsyncTracker::GpuAsyncTracker(const SchedulerConfig& config)\n : GpuAsyncTrackerBase(config) {}\nResourcesVector GpuAsyncTracker::GetResourcesFromInstruction(\n const HloInstruction& instr) const {\n CanonicalAsyncOp op = GetCanonicalAsyncOp(instr);\n if (op.outer == HloOpcode::kAsyncStart || op.outer == HloOpcode::kAsyncDone) {\n ResourceUsageType usage;\n GpuResourceType resource;\n if (op.inner == HloOpcode::kSend || op.inner == HloOpcode::kRecv) {\n std::tie(resource, usage) = GetP2PResourceAndUsage(instr, op);\n } else {\n usage = op.outer == HloOpcode::kAsyncStart\n ? ResourceUsageType::kResourceRelease\n : ResourceUsageType::kResourceOccupy;\n resource = hlo_query::IsCollectiveCommunicationOp(op.inner)\n ? GpuResourceType::kGpuAsyncStreamCollectives\n : GpuResourceType::kGpuAsyncStreamComputes;\n }\n return {std::make_pair(\n GetFirstTargetDefinedResource() + static_cast(resource),\n usage)};\n }\n return GpuAsyncTrackerBase::GetResourcesFromInstruction(instr);\n}\nint64_t GpuAsyncTracker::GetNumTargetDefinedResources() const {\n return static_cast(GpuResourceType::kNumTargetResources);\n};\nint64_t GpuAsyncTracker::GetNumAvailableResources(int64_t resource_type) const {\n const int64_t first_target_resource = GetFirstTargetDefinedResource();\n if (resource_type < first_target_resource) {\n return GpuAsyncTrackerBase::GetNumAvailableResources(resource_type);\n }\n CHECK_LT(resource_type,\n first_target_resource +\n static_cast(GpuResourceType::kNumTargetResources));\n if ((resource_type - first_target_resource) ==\n static_cast(GpuResourceType::kGpuAsyncStreamComputes)) {\n return 2;\n }\n return 1;\n}\nabsl::string_view GpuAsyncTracker::GetResourceName(\n int64_t resource_type) const {\n const int64_t first_target_resource = GetFirstTargetDefinedResource();\n if (resource_type < first_target_resource) {\n return GpuAsyncTrackerBase::GetResourceName(resource_type);\n }\n CHECK_LE(resource_type,\n first_target_resource + GetNumTargetDefinedResources());\n switch (static_cast(resource_type - first_target_resource)) {\n case GpuResourceType::kGpuAsyncStreamSend0:\n return \"kGpuAsyncStreamSend0\";\n case GpuResourceType::kGpuAsyncStreamSend1:\n return \"kGpuAsyncStreamSend1\";\n case GpuResourceType::kGpuAsyncStreamRecv0:\n return \"kGpuAsyncStreamRecv0\";\n case GpuResourceType::kGpuAsyncStreamRecv1:\n return \"kGpuAsyncStreamRecv1\";\n case GpuResourceType::kGpuAsyncStreamCollectives:\n return \"kGpuAsyncStreamCollectives\";\n case GpuResourceType::kGpuAsyncStreamComputes:\n return \"kGpuAsyncStreamComputes\";\n default:\n return \"kUnsupportedResource\";\n }\n}\nResourceHazardType GpuAsyncTracker::GetResourceHazardType(\n int64_t resource_type) const {\n const int64_t first_target_resource = GetFirstTargetDefinedResource();\n if (resource_type < first_target_resource) {\n return GpuAsyncTrackerBase::GetResourceHazardType(resource_type);\n }\n CHECK_LE(resource_type,\n first_target_resource + GetNumTargetDefinedResources());\n return ResourceHazardType::kUnshareable;\n}\nint64_t GpuAsyncTracker::GetNumResourcesPerInstruction(\n int64_t resource_type, const HloInstruction& instr) const {\n int64_t num_resources =\n GpuAsyncTrackerBase::GetNumResourcesPerInstruction(resource_type, instr);\n if (num_resources <= 0 || instr.opcode() != HloOpcode::kWhile) {\n return num_resources;\n }\n int64_t first_p2p_resource =\n GetFirstTargetDefinedResource() +\n static_cast(GpuResourceType::kGpuAsyncStreamSend0);\n if (resource_type < first_p2p_resource ||\n resource_type > first_p2p_resource + 4) {\n return num_resources;\n }\n auto find_instruction_for_pipeline = [&](HloOpcode opcode, int64_t pipeline) {\n for (auto user1 : instr.users()) {\n if (user1->opcode() == HloOpcode::kGetTupleElement) {\n for (auto user2 : user1->users()) {\n if (user2->opcode() == opcode) {\n if (GetPipelineStream(*user2) == pipeline) {\n return true;\n }\n }\n }\n }\n }\n return false;\n };\n bool found;\n if (resource_type == first_p2p_resource) {\n found = find_instruction_for_pipeline(HloOpcode::kSendDone, 0);\n } else if (resource_type == first_p2p_resource + 1) {\n found = find_instruction_for_pipeline(HloOpcode::kSendDone, 1);\n } else if (resource_type == first_p2p_resource + 2) {\n found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 0);\n } else {\n found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 1);\n }\n return num_resources - (found ? 1 : 0);\n}\nGpuLatencyEstimator::GpuLatencyEstimator(int64_t pointer_size,\n GetCanonicalAsyncOpFunc func)\n : ApproximateLatencyEstimator(func), pointer_size_(pointer_size) {}\nApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::NodeCost(\n const HloInstruction* instr) const {\n if (IsNopInstruction(*instr)) {\n return 0.0;\n }\n if (instr->opcode() == HloOpcode::kCustomCall) {\n if (IsCublasGemm(*instr) || IsCustomCallToDnnConvolution(*instr)) {\n return ApproximateLatencyEstimator::kMediumCost;\n }\n return ApproximateLatencyEstimator::kMediumCost;\n }\n return ApproximateLatencyEstimator::NodeCost(instr);\n}\nApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::GetLatencyBetween(\n const HloGraphNode& from, const HloGraphNode& to) const {\n if (IsAsyncPair(from, to)) {\n if (from.GetInstr().opcode() == HloOpcode::kRecv) {\n return ApproximateLatencyEstimator::kLowLatency;\n } else if (from.GetInstr().opcode() == HloOpcode::kSend) {\n return ApproximateLatencyEstimator::kHighLatency * 10;\n }\n bool enable_approx_collectives =\n from.GetInstr()\n .GetModule()\n ->config()\n .debug_options()\n .xla_gpu_enable_approx_costly_collectives();\n bool is_all_reduce = from.GetInstr().opcode() == HloOpcode::kAllReduceStart;\n bool collective_size_exceeds_threshold =\n GetSizeOfShape(from.GetInstr().shape(), pointer_size_) >\n kCostlyAllReduceThreshold;\n if (enable_approx_collectives && is_all_reduce &&\n collective_size_exceeds_threshold) {\n return ApproximateLatencyEstimator::kHighLatency *\n kCostlyAllReduceMultiplier;\n }\n return ApproximateLatencyEstimator::kHighLatency;\n }\n return ApproximateLatencyEstimator::kLowLatency;\n}\nvoid GPUProfileStatisticsAggregator::HandleMissingInstructionCost(\n const HloInstruction& instruction) {\n if (!IsNopInstruction(instruction) &&\n instruction.opcode() != HloOpcode::kWhile) {\n missing_instructions_.insert(&instruction);\n }\n}\nvoid GPUProfileStatisticsAggregator::HandleFoundInstructionCost(\n const HloInstruction& instruction) {\n found_instructions_count_++;\n}\nvoid GPUProfileStatisticsAggregator::HandleMissingInstructionLatency(\n const HloInstruction& from, const HloInstruction& to) {\n if (IsAsyncPair(from, to)) {\n missing_instructions_.insert(&from);\n }\n}\nvoid GPUProfileStatisticsAggregator::HandleFoundInstructionLatency(\n const HloInstruction& from, const HloInstruction& to) {\n found_instructions_count_++;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/gpu_latency_hiding_scheduler.h\"\n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/gpu/gpu_hlo_schedule.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/profile_guided_latency_estimator.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla::gpu {\nnamespace {\nusing ::testing::Property;\nusing ::testing::UnorderedElementsAre;\nusing ::tsl::testing::StatusIs;\nclass GpuLatencyHidingSchedulerBaseTest : public HloTestBase {\n protected:\n absl::StatusOr ScheduleModule(HloModule* module) {\n auto& test_backend = backend();\n const auto& gpu_device_info =\n test_backend.default_stream_executor()->GetDeviceDescription();\n HloModuleConfig config(module->config());\n DebugOptions dboptions(config.debug_options());\n dboptions.set_xla_gpu_enable_pgle_accuracy_checker(true);\n config.set_debug_options(dboptions);\n module->set_config(config);\n TF_RETURN_IF_ERROR(\n ScheduleGpuModule(module, 8, gpu_device_info)\n .status());\n return module;\n }\n HloModuleConfig GetModuleConfig(absl::string_view fdo_profile) {\n HloModuleConfig config;\n DebugOptions debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true);\n debug_options.set_xla_gpu_lhs_enable_gpu_async_tracker(true);\n config.set_debug_options(debug_options);\n *config.mutable_fdo_profile() = fdo_profile;\n return config;\n }\n};\nTEST_F(GpuLatencyHidingSchedulerBaseTest,\n GPUProfileStatisticsAggregatorDoesNotCountMissingNoops) {\n GPUProfileStatisticsAggregator aggregator;\n ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats();\n ASSERT_EQ(before_stats.missing_instructions.size(), 0);\n ASSERT_EQ(before_stats.found_instructions_count, 0);\n absl::string_view kFdoProfile = \"\";\n absl::string_view kHloModule = R\"(\n HloModule m\n ENTRY main {\n parameter0 = f32[] parameter(0)\n parameter1 = f32[32] parameter(1)\n const0 = f32[] constant(42)\n bitcast0 = f32[2,16] bitcast(parameter1)\n partition-id0 = u32[] partition-id()\n replica-id0 = u32[] replica-id()\n tuple0 = (f32[], f32[2,16], u32[], u32[]) tuple(parameter0, bitcast0, partition-id0, replica-id0)\n opt-barrier = (f32[], f32[2,16], u32[], u32[]) opt-barrier(tuple0)\n ROOT _ = get-tuple-element(opt-barrier), index=0\n }\n )\";\n auto config = GetModuleConfig(kFdoProfile);\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kHloModule, config));\n for (const HloInstruction* instr :\n module->entry_computation()->instructions()) {\n aggregator.HandleMissingInstructionCost(*instr);\n ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats();\n EXPECT_EQ(after_stats.missing_instructions.size(), 0);\n EXPECT_EQ(after_stats.found_instructions_count, 0);\n }\n}\nTEST_F(GpuLatencyHidingSchedulerBaseTest,\n GPUProfileStatisticsAggregatorCountsMissingInstruction) {\n GPUProfileStatisticsAggregator aggregator;\n ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats();\n ASSERT_EQ(before_stats.missing_instructions.size(), 0);\n ASSERT_EQ(before_stats.found_instructions_count, 0);\n absl::string_view kFdoProfile = R\"pb(\n costs { name: \"dot0\" cost_us: 100.0 }\n )pb\";\n absl::string_view kHloModule = R\"(\n HloModule m\n ENTRY main {\n parameter0 = f32[] parameter(0)\n parameter1 = f32[32] parameter(1)\n const0 = f32[] constant(42)\n add0 = f32[] add(parameter0, const0)\n bitcast0 = f32[2,16] bitcast(parameter1)\n tuple0 = (f32[], f32[2,16]) tuple(add0, bitcast0)\n ROOT _ = get-tuple-element(tuple0), index=0\n }\n )\";\n auto config = GetModuleConfig(kFdoProfile);\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kHloModule, config));\n for (const HloInstruction* instr :\n module->entry_computation()->instructions()) {\n aggregator.HandleMissingInstructionCost(*instr);\n }\n ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats();\n EXPECT_EQ(after_stats.missing_instructions.size(), 1);\n EXPECT_EQ((*after_stats.missing_instructions.begin())->opcode(),\n HloOpcode::kAdd);\n EXPECT_EQ(after_stats.found_instructions_count, 0);\n}\nTEST_F(GpuLatencyHidingSchedulerBaseTest,\n GPUProfileStatisticsAggregatorCountsMissingAsyncPairs) {\n GPUProfileStatisticsAggregator aggregator;\n ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats();\n ASSERT_EQ(before_stats.missing_instructions.size(), 0);\n ASSERT_EQ(before_stats.found_instructions_count, 0);\n absl::string_view kFdoProfile = \"\";\n absl::string_view kHloModule = R\"(\n HloModule m\n reduce {\n x = f32[] parameter(0)\n y = f32[] parameter(1)\n ROOT _ = f32[] add(x, y)\n }\n ENTRY main {\n p0 = f32[] parameter(0)\n p1 = f32[2] parameter(1)\n ar_0 = f32[] all-reduce-start(p0), to_apply=reduce\n ar_1 = f32[] all-reduce-done(ar_0)\n rs_0 = ((f32[2]), f32[1]) reduce-scatter-start(p1), to_apply=reduce, dimensions={0}\n rs_1 = f32[1] reduce-scatter-done(rs_0)\n ag_0 = (f32[2], f32[4]) all-gather-start(p1), replica_groups={{0,1}}, dimensions={0}\n ag_1 = f32[4] all-gather-done(ag_0)\n ROOT _ = (f32[], f32[1], f32[4]) tuple(ar_1, rs_1, ag_1)\n }\n )\";\n auto config = GetModuleConfig(kFdoProfile);\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kHloModule, config));\n for (const HloInstruction* instr :\n module->entry_computation()->instructions()) {\n for (const HloInstruction* user : instr->users()) {\n aggregator.HandleMissingInstructionLatency(*instr, *user);\n }\n }\n ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats();\n EXPECT_EQ(after_stats.found_instructions_count, 0);\n EXPECT_EQ(after_stats.missing_instructions.size(), 3);\n EXPECT_THAT(\n after_stats.missing_instructions,\n UnorderedElementsAre(\n Property(&HloInstruction::opcode, HloOpcode::kAllReduceStart),\n Property(&HloInstruction::opcode, HloOpcode::kAsyncStart),\n Property(&HloInstruction::opcode, HloOpcode::kAllGatherStart)));\n}\nTEST_F(GpuLatencyHidingSchedulerBaseTest,\n ScheduleGpuModuleErrorsOutOnMissingInstrucitonsForAWhileLoopBody) {\n absl::string_view kFdoProfile = R\"pb(\n costs { name: \"dot0\" cost_us: 100.0 }\n )pb\";\n absl::string_view kHloModule = R\"(\n HloModule m\n loop_body {\n p = (u32[], f32[1]) parameter(0)\n t0 = u32[] get-tuple-element(p), index=0\n t1 = f32[1] get-tuple-element(p), index=1\n add0 = f32[1] add(t1, t1)\n ROOT _ = (u32[],f32[1]) tuple(t0,t1)\n }\n loop_cond {\n p1 = (u32[], f32[1]) parameter(0)\n count = u32[] get-tuple-element(p1), index=0\n ub = u32[] constant(2)\n ROOT _ = pred[] compare(count, ub), direction=LT\n }\n ENTRY main {\n p2 = f32[1] parameter(0)\n ind = u32[] constant(1)\n t = (u32[],f32[1]) tuple(ind,p2)\n w = (u32[],f32[1]) while(t), body=loop_body, condition=loop_cond\n ROOT _ = f32[1] get-tuple-element(w), index=1\n }\n )\";\n auto config = GetModuleConfig(kFdoProfile);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule, config));\n EXPECT_THAT(ScheduleModule(module.get()),\n StatusIs(absl::StatusCode::kInvalidArgument));\n}\nTEST_F(GpuLatencyHidingSchedulerBaseTest,\n ScheduleGpuModuleErrorsOutOnMissingInstrucitonsForAnEntryComputation) {\n absl::string_view kFdoProfile = R\"pb(\n costs { name: \"dot0\" cost_us: 100.0 }\n )pb\";\n absl::string_view kHloModule = R\"(\n HloModule m\n ENTRY main {\n p0 = f32[1] parameter(0)\n ROOT add0 = f32[1] add(p0,p0)\n }\n )\";\n auto config = GetModuleConfig(kFdoProfile);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule, config));\n EXPECT_THAT(ScheduleModule(module.get()),\n StatusIs(absl::StatusCode::kInvalidArgument));\n}\nTEST_F(GpuLatencyHidingSchedulerBaseTest,\n ScheduleGpuModulePassesOnFullFDOProfile) {\n absl::string_view kFdoProfile = R\"pb(\n costs { name: \"add0\" cost_us: 100.0 }\n )pb\";\n absl::string_view kHloModule = R\"(\n HloModule m\n ENTRY main {\n p0 = f32[1] parameter(0)\n ROOT add0 = f32[1] add(p0,p0)\n }\n )\";\n auto config = GetModuleConfig(kFdoProfile);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloModule, config));\n TF_EXPECT_OK(ScheduleModule(module.get()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1192,"cells":{"ID":{"kind":"string","value":"ae38afae-cea4-480b-bc81-c353972f7926"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reduction_utils"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/reduction_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/reduction_utils_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/reduction_utils.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/base/const_init.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/service/gpu/ir_emission_utils.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/semantic_version.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/logging.h\"\n#ifdef GOOGLE_CUDA\n#include \"xla/service/gpu/gpu_asm_opts_util.h\"\n#include \"xla/stream_executor/cuda/cuda_asm_compiler.h\"\n#endif \nnamespace xla {\nnamespace gpu {\nnamespace {\nVector3 PartitionShapeByMiddleDimensions(\n const Shape& shape, absl::Span dims_middle) {\n CHECK(LayoutUtil::AreDimensionsConsecutive(shape.layout(), dims_middle));\n Vector3 values = {1, 1, 1};\n enum Segment { kMajor = 0, kMiddle = 1, kMinor = 2 };\n Segment cur_segment = kMinor;\n for (int64_t cur_dim : LayoutUtil::MinorToMajor(shape)) {\n if (cur_segment != kMajor) {\n bool cur_dim_in_middle = absl::c_linear_search(dims_middle, cur_dim);\n if (cur_segment == kMinor) {\n if (cur_dim_in_middle) {\n cur_segment = kMiddle;\n }\n } else if (cur_segment == kMiddle) {\n if (!cur_dim_in_middle) {\n cur_segment = kMajor;\n }\n }\n }\n values[cur_segment] *= shape.dimensions(cur_dim);\n }\n return values;\n}\n} \nint64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config) {\n#ifdef GOOGLE_CUDA\n static absl::Mutex mutex(absl::kConstInit);\n static std::atomic use_reduced_thread_count_atomic = nullptr;\n bool* use_reduced_thread_count =\n use_reduced_thread_count_atomic.load(std::memory_order_acquire);\n if (use_reduced_thread_count == nullptr) {\n absl::MutexLock lock(&mutex);\n use_reduced_thread_count =\n use_reduced_thread_count_atomic.load(std::memory_order_relaxed);\n if (use_reduced_thread_count == nullptr) {\n auto ptxas_config =\n PtxOptsFromDebugOptions(hlo_module_config.debug_options());\n auto ptxas_version_tuple =\n se::GetAsmCompilerVersion(ptxas_config.preferred_cuda_dir);\n use_reduced_thread_count = new bool(false);\n if (!ptxas_version_tuple.ok() ||\n ptxas_version_tuple.value() <\n stream_executor::SemanticVersion{12, 2, 0}) {\n *use_reduced_thread_count = true;\n }\n use_reduced_thread_count_atomic.store(use_reduced_thread_count,\n std::memory_order_release);\n }\n }\n if (*use_reduced_thread_count) {\n return 512;\n }\n#endif \n return 1024;\n}\nVector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions) {\n if (reduction_dimensions.is_row_reduction) {\n int64_t tile_z = std::min(reduction_dimensions.dimensions[0],\n BatchedReductionRaceFreeBound());\n return {tile_z, 1, 16};\n }\n return {1, 128, 1};\n}\nint64_t ReductionDimensionRaceFreeBound(\n const HloModuleConfig& hlo_module_config,\n const ReductionDimensions& reduction_dimensions) {\n Vector3 reduction_tiling = GetReductionTiling(reduction_dimensions);\n if (reduction_dimensions.is_row_reduction) {\n return MinThreadsXRowReduction(hlo_module_config) * reduction_tiling[2];\n }\n return WarpSize() * reduction_tiling[1];\n}\nbool IsUnnestedReductionFasterThanElemental(\n const ReductionDimensions& reduction_dimensions) {\n if (reduction_dimensions.is_row_reduction) {\n return (reduction_dimensions.dimensions[2] >= WarpSize()) ||\n ((WarpSize() % reduction_dimensions.dimensions[2]) == 0);\n }\n int64_t major_size = reduction_dimensions.dimensions[1];\n int64_t minor_size = reduction_dimensions.dimensions[2];\n bool prefer_elemental_emitter =\n (major_size < WarpSize()) ||\n (major_size < 2 * WarpSize() && minor_size < WarpSize()) ||\n (major_size < 4 * WarpSize() && minor_size < 8) ||\n (major_size < 8 * WarpSize() && minor_size < 3);\n return !prefer_elemental_emitter;\n}\nbool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce) {\n if (reduce.opcode() != HloOpcode::kReduce) {\n return false;\n }\n const Shape& operand_shape = reduce.operand(0)->shape();\n absl::Span dims_to_reduce = reduce.dimensions();\n DimensionVector dims_to_keep;\n for (int64_t dim = 0; dim < operand_shape.dimensions().size(); ++dim) {\n if (!absl::c_linear_search(dims_to_reduce, dim)) {\n dims_to_keep.push_back(dim);\n }\n }\n return (LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),\n dims_to_keep) ||\n LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),\n dims_to_reduce)) &&\n IsUnnestedReductionFasterThanElemental(\n GetReductionKindAndContiguousComponents(reduce));\n}\nbool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config,\n const ReductionDimensions& reduction_dimensions) {\n if (reduction_dimensions.is_row_reduction) {\n return reduction_dimensions.dimensions[2] <=\n ReductionDimensionRaceFreeBound(hlo_module_config,\n reduction_dimensions) &&\n reduction_dimensions.dimensions[0] <=\n BatchedReductionRaceFreeBound();\n }\n return reduction_dimensions.dimensions[1] <=\n ReductionDimensionRaceFreeBound(hlo_module_config,\n reduction_dimensions);\n}\nstd::ostream& operator<<(std::ostream& os,\n const ReductionDimensions& reduction_dimensions) {\n bool is_row_reduction = reduction_dimensions.is_row_reduction;\n os << (is_row_reduction ? \"row \" : \"column \") << \"reduction [\"\n << absl::StrJoin(reduction_dimensions.dimensions, \",\") << \"] -> [\"\n << reduction_dimensions.dimensions[0] << \", \"\n << reduction_dimensions\n .dimensions[is_row_reduction\n ? ReductionDimensions::kRowKeptDimension\n : ReductionDimensions::kColMinorKeptDimension]\n << \"]\";\n return os;\n}\nReductionDimensions GetReductionKindAndContiguousComponents(\n const HloInstruction& reduce) {\n Shape input_shape = reduce.operand(0)->shape();\n absl::Span dims_to_reduce = reduce.dimensions();\n DimensionVector dims_to_keep;\n for (int64_t dim = 0; dim < input_shape.rank(); ++dim) {\n if (!absl::c_linear_search(dims_to_reduce, dim)) {\n dims_to_keep.push_back(dim);\n }\n }\n if (dims_to_keep.empty()) {\n return {true,\n {1, 1, ShapeUtil::ElementsIn(input_shape)}};\n }\n if (LayoutUtil::AreDimensionsConsecutive(input_shape.layout(),\n dims_to_keep)) {\n Vector3 shape_partition =\n PartitionShapeByMiddleDimensions(input_shape, dims_to_keep);\n if (shape_partition[1] == 1) {\n return {true,\n {1, 1, shape_partition[0] * shape_partition[2]}};\n }\n if (shape_partition[2] == 1) {\n return {false,\n {1, shape_partition[0], shape_partition[1]}};\n }\n return {true, shape_partition};\n }\n Vector3 shape_partition =\n PartitionShapeByMiddleDimensions(input_shape, dims_to_reduce);\n if (shape_partition[2] == 1) {\n return {true,\n {1, shape_partition[0], shape_partition[1]}};\n }\n return {false, shape_partition};\n}\nbool IsRealReductionHero(const HloInstruction& root,\n const HloInstruction& hero) {\n if (!IsReductionFromOrToContiguousDimensions(hero)) {\n return false;\n }\n return &root == &hero ||\n ReductionIsRaceFree(hero.GetModule()->config(),\n GetReductionKindAndContiguousComponents(hero));\n}\nbool AreReductionsMultiOutputFusionCompatible(\n const HloInstruction* reduce_hero, const HloInstruction* first_reduce) {\n return GetReductionKindAndContiguousComponents(*reduce_hero) ==\n GetReductionKindAndContiguousComponents(*first_reduce);\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/reduction_utils.h\"\n#include \n#include \n#include \"absl/strings/str_cat.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing ::testing::ElementsAre;\nusing ReductionUtilsTest = HloTestBase;\nconst char kModulePrefix[] = R\"(\n HloModule test_module\n scalar_add {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT add = f32[] add(lhs, rhs)\n })\";\nTEST_F(ReductionUtilsTest, ReductionsAreMultioutputFusionCompatible) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_sibling1 {\n p_0 = f32[32,64]{1,0} parameter(0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add\n }\n fused_sibling2 {\n p_0 = f32[32,64]{1,0} parameter(0)\n neg = f32[32,64]{1,0} negate(p_0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add\n }\n ENTRY entry {\n p_0 = f32[32,64]{1,0} parameter(0)\n fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1\n fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2\n ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* fusion1 = root->operand(0);\n const HloInstruction* fusion2 = root->operand(1);\n EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(\n fusion1->fused_expression_root(), fusion2->fused_expression_root()));\n}\nTEST_F(ReductionUtilsTest,\n ReductionsWithSameCanonicalizedDimsAreMultioutputFusionCompatible) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_sibling1 {\n p_0 = f32[32,64]{1,0} parameter(0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add\n }\n fused_sibling2 {\n p_0 = f32[32,64]{1,0} parameter(0)\n bitcast = f32[32,8,8]{2,1,0} bitcast(p_0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(bitcast, constant), dimensions={1,2}, to_apply=scalar_add\n }\n ENTRY entry {\n p_0 = f32[32,64]{1,0} parameter(0)\n fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1\n fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2\n ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* fusion1 = root->operand(0);\n const HloInstruction* fusion2 = root->operand(1);\n EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(\n fusion1->fused_expression_root(), fusion2->fused_expression_root()));\n}\nTEST_F(ReductionUtilsTest,\n ReductionsAreNotMultioutputFusionCompatible_DifferentOperandShapes) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_sibling1 {\n p_0 = f32[32,64]{1,0} parameter(0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add\n }\n fused_sibling2 {\n p_0 = f32[64,32]{1,0} parameter(0)\n neg = f32[64,32]{1,0} negate(p_0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={0}, to_apply=scalar_add\n }\n ENTRY entry {\n p_0 = f32[32,64]{1,0} parameter(0)\n p_1 = f32[64,32]{1,0} parameter(1)\n fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1\n fusion2 = f32[32]{0} fusion(p_1), kind=kInput, calls=fused_sibling2\n ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* fusion1 = root->operand(0);\n const HloInstruction* fusion2 = root->operand(1);\n EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(\n fusion1->fused_expression_root(), fusion2->fused_expression_root()));\n}\nTEST_F(ReductionUtilsTest,\n ReductionsAreNotMultioutputFusionCompatible_DifferentOutputShapes) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_sibling1 {\n p_0 = f32[32,64]{1,0} parameter(0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add\n }\n fused_sibling2 {\n p_0 = f32[64,32]{1,0} parameter(0)\n neg = f32[64,32]{1,0} negate(p_0)\n constant = f32[] constant(0)\n ROOT reduce = f32[64]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add\n }\n ENTRY entry {\n p_0 = f32[32,64]{1,0} parameter(0)\n p_1 = f32[64,32]{1,0} parameter(1)\n fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1\n fusion2 = f32[64]{0} fusion(p_1), kind=kInput, calls=fused_sibling2\n ROOT root = (f32[32]{0}, f32[64]{0}) tuple(fusion1, fusion2)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* fusion1 = root->operand(0);\n const HloInstruction* fusion2 = root->operand(1);\n EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(\n fusion1->fused_expression_root(), fusion2->fused_expression_root()));\n}\nTEST_F(ReductionUtilsTest,\n ReductionsAreNotMultioutputFusionCompatible_DifferentReduceDimensions) {\n auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R\"(\n fused_sibling1 {\n p_0 = f32[32,32]{1,0} parameter(0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={0}, to_apply=scalar_add\n }\n fused_sibling2 {\n p_0 = f32[32,32]{1,0} parameter(0)\n neg = f32[32,32]{1,0} negate(p_0)\n constant = f32[] constant(0)\n ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add\n }\n ENTRY entry {\n p_0 = f32[32,32]{1,0} parameter(0)\n fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1\n fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2\n ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)\n })\"))\n .value();\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* fusion1 = root->operand(0);\n const HloInstruction* fusion2 = root->operand(1);\n EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(\n fusion1->fused_expression_root(), fusion2->fused_expression_root()));\n}\nTEST(ReductionDimensionsTest, GetOutputShape) {\n ReductionDimensions row_reduction{true, {1, 2, 3}};\n ReductionDimensions col_reduction{false, {1, 2, 3}};\n EXPECT_THAT(row_reduction.GetOutputShape(), ElementsAre(2));\n EXPECT_THAT(col_reduction.GetOutputShape(), ElementsAre(1, 3));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1193,"cells":{"ID":{"kind":"string","value":"757b4fb9-3ffb-4887-8a6f-740c46156148"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"runtime_intrinsics"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/runtime_intrinsics.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/runtime_intrinsics_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/runtime_intrinsics.h\"\n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/ascii.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/custom_call_status.h\"\n#include \"xla/service/custom_call_target_registry.h\"\n#include \"xla/service/platform_util.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_memory.h\"\n#include \"xla/stream_executor/platform.h\"\n#include \"xla/stream_executor/platform_manager.h\"\n#include \"xla/stream_executor/stream.h\"\n#include \"xla/stream_executor/stream_finder.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nstd::string GetGpuPlatformName() {\n return absl::AsciiStrToUpper(\n PlatformUtil::CanonicalPlatformName(\"gpu\").value());\n}\nabsl::Status AssertOnGpu(void* stream_handle, void* buffer,\n absl::string_view error_msg) {\n TF_ASSIGN_OR_RETURN(\n se::Platform * platform,\n se::PlatformManager::PlatformWithName(GetGpuPlatformName()));\n TF_ASSIGN_OR_RETURN(se::Stream * stream,\n stream_executor::FindStream(platform, stream_handle));\n if (!stream) {\n return Internal(\"Stream not found for: %p\", stream_handle);\n }\n int8_t expected = false;\n int64_t byte_size = sizeof(int8_t);\n CHECK_EQ(byte_size, ShapeUtil::ByteSizeOfPrimitiveType(PrimitiveType::PRED));\n TF_RETURN_IF_ERROR(stream->Memcpy(\n &expected, se::DeviceMemoryBase{buffer, static_cast(byte_size)},\n byte_size));\n TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());\n if (!static_cast(expected)) {\n return Internal(\"%s\", error_msg);\n }\n return absl::OkStatus();\n}\nvoid AssertionCustomCall(void* stream_handle, void** buffers,\n const char* opaque, int opaque_len,\n XlaCustomCallStatus* status) {\n absl::Status s =\n AssertOnGpu(stream_handle, buffers[0],\n absl::string_view{opaque, static_cast(opaque_len)});\n if (!s.ok()) {\n auto msg = s.message();\n XlaCustomCallStatusSetFailure(status, msg.data(), msg.size());\n }\n}\nvoid NopReturnTokenCustomCall(void* stream_handle, void** buffers,\n const char* opaque, int opaque_len,\n XlaCustomCallStatus* status) {\n VLOG(1) << \"NopReturnTokenCustomCall called.\";\n}\n} \nXLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(\n std::string(kXlaGpuAssertCustomCallTag), AssertionCustomCall,\n GetGpuPlatformName());\nXLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM(\n std::string(kNopReturnTokenCustomCallTarget), NopReturnTokenCustomCall,\n GetGpuPlatformName());\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing RuntimeIntrinsicsTest = HloTestBase;\nTEST_F(RuntimeIntrinsicsTest, NopReturnTokenWorks) {\n constexpr absl::string_view kHloText = R\"(\nHloModule m\nENTRY e {\n constant = u32[2]{0} constant({0, 1})\n ROOT nop_return_token = token[] custom-call(constant), custom_call_target=\"NopReturnToken\", custom_call_has_side_effect=true\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n GetOptimizedModule(kHloText));\n EXPECT_EQ(module->entry_computation()->instruction_count(), 2);\n EXPECT_TRUE(Run(std::move(module), false));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime_intrinsics.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime_intrinsics_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1194,"cells":{"ID":{"kind":"string","value":"7606cb75-ccae-4f79-b2f0-741f5d1073cb"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"ir_emitter"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/cpu/ir_emitter.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/cpu/ir_emitter_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/cpu/ir_emitter.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/cleanup/cleanup.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/meta/type_traits.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"llvm/IR/Attributes.h\"\n#include \"llvm/IR/BasicBlock.h\"\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/FMF.h\"\n#include \"llvm/IR/GlobalVariable.h\"\n#include \"llvm/IR/IRBuilder.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/Intrinsics.h\"\n#include \"llvm/IR/IntrinsicsX86.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/IR/Value.h\"\n#include \"xla/hlo/ir/collective_device_list.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/map_util.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/buffer_assignment.h\"\n#include \"xla/service/collective_ops_utils.h\"\n#include \"xla/service/cpu/backend_config.pb.h\"\n#include \"xla/service/cpu/cpu_options.h\"\n#include \"xla/service/cpu/cpu_runtime.h\"\n#include \"xla/service/cpu/dot_op_emitter.h\"\n#include \"xla/service/cpu/elemental_math_emitter.h\"\n#include \"xla/service/cpu/ir_emission_utils.h\"\n#include \"xla/service/cpu/ir_function.h\"\n#include \"xla/service/cpu/onednn_config.pb.h\"\n#include \"xla/service/cpu/parallel_loop_emitter.h\"\n#include \"xla/service/elemental_ir_emitter.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/llvm_ir/buffer_assignment_util.h\"\n#include \"xla/service/llvm_ir/dynamic_update_slice_util.h\"\n#include \"xla/service/llvm_ir/ir_array.h\"\n#include \"xla/service/llvm_ir/llvm_loop.h\"\n#include \"xla/service/llvm_ir/llvm_type_conversion_util.h\"\n#include \"xla/service/llvm_ir/llvm_util.h\"\n#include \"xla/service/llvm_ir/loop_emitter.h\"\n#include \"xla/service/llvm_ir/tuple_ops.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/tsl/lib/math/math_util.h\"\n#include \"xla/util.h\"\n#include \"xla/window_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\n#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)\n#include \"xla/service/cpu/onednn_memory_util.h\"\n#endif\nnamespace xla {\nnamespace {\nusing llvm_ir::IrName;\nusing llvm_ir::SetToFirstInsertPoint;\n} \nnamespace cpu {\nclass IrEmitter::CpuElementalIrEmitter : public ElementalIrEmitter {\n public:\n CpuElementalIrEmitter(const HloModuleConfig& module_config,\n IrEmitter* ir_emitter, llvm::Module* module)\n : ElementalIrEmitter(\n module, ir_emitter->b(),\n Options{true}),\n hlo_module_config_(module_config),\n ir_emitter_(ir_emitter) {}\n protected:\n absl::StatusOr EmitAtan2(PrimitiveType prim_type,\n llvm::Value* lhs, llvm::Value* rhs,\n absl::string_view) override {\n return xla::cpu::EmitAtan2(module(), *b(), prim_type, lhs, rhs);\n }\n absl::StatusOr EmitTanh(PrimitiveType prim_type,\n llvm::Value* value) override {\n return xla::cpu::EmitTanh(module(), *b(), prim_type, value);\n }\n absl::StatusOr EmitErf(PrimitiveType prim_type,\n llvm::Value* value) override {\n return xla::cpu::EmitErf(module(), *b(), prim_type, value);\n }\n absl::StatusOr> EmitThreadLocalCall(\n const HloComputation& callee, absl::Span parameters,\n absl::string_view name, bool is_reducer) override {\n return ir_emitter_->EmitThreadLocalCall(callee, parameters, name,\n is_reducer);\n }\n bool fast_min_max() override {\n return hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max();\n }\n const HloModuleConfig& hlo_module_config_;\n IrEmitter* ir_emitter_;\n};\nIrEmitter::IrEmitter(mlir::MLIRContext* mlir_context,\n const HloModule& hlo_module,\n const BufferAssignment& assignment,\n llvm::Module* llvm_module,\n absl::flat_hash_map\n instruction_to_profile_idx,\n absl::flat_hash_map\n computation_to_profile_idx,\n absl::flat_hash_map\n computation_transitively_contains_custom_call,\n const TargetMachineFeatures* target_machine_features,\n bool emit_code_for_msan)\n : assignment_(assignment),\n module_(llvm_module),\n arch_type_(llvm::Triple(llvm_module->getTargetTriple()).getArch()),\n main_builder_(llvm_module->getContext()),\n current_builder_(&main_builder_),\n mlir_context_(mlir_context),\n instruction_to_profile_idx_(std::move(instruction_to_profile_idx)),\n computation_to_profile_idx_(std::move(computation_to_profile_idx)),\n computation_transitively_contains_custom_call_(\n std::move(computation_transitively_contains_custom_call)),\n alias_analysis_(hlo_module, assignment, &llvm_module->getContext()),\n hlo_module_config_(hlo_module.config()),\n is_top_level_computation_(false),\n target_machine_features_(*target_machine_features),\n emit_code_for_msan_(emit_code_for_msan) {\n b()->setFastMathFlags(llvm_ir::GetCpuFastMathFlags(hlo_module_config_));\n absl::Status s = GatherComputationsByAllocationType(\n &hlo_module, &thread_local_computations_, &global_computations_);\n absl::c_sort(thread_local_computations_);\n absl::c_sort(global_computations_);\n TF_CHECK_OK(s) << \"Should have failed buffer assignment.\";\n}\nIrEmitter::~IrEmitter() {\n if (!compute_function_.empty()) {\n LOG(WARNING) << \"Compute function stack is not empty: \"\n << compute_function_.size();\n }\n};\nvoid IrEmitter::EmitThreadLocalFunctionEpilogue(HloComputation* computation) {\n llvm::Argument* out_parameter = compute_function()->result_arg();\n llvm_ir::IrArray root_value = GetIrArrayFor(computation->root_instruction());\n const Shape& return_shape = computation->root_instruction()->shape();\n if (ShapeUtil::IsScalar(return_shape)) {\n llvm::Value* ret_value =\n Load(root_value.GetBasePointeeType(), root_value.GetBasePointer(),\n \"load_ret_value\");\n Store(ret_value, out_parameter);\n } else {\n CHECK(return_shape.IsTuple());\n llvm::Type* tuple_type = llvm_ir::ShapeToIrType(return_shape, module_);\n for (int i = 0; i < return_shape.tuple_shapes_size(); i++) {\n const Shape& element_shape = return_shape.tuple_shapes(i);\n llvm::Value* destination = llvm_ir::EmitGetTupleElement(\n element_shape,\n i,\n MinimumAlignmentForShape(element_shape), out_parameter,\n tuple_type, b());\n llvm::Value* source = llvm_ir::EmitGetTupleElement(\n element_shape,\n i,\n MinimumAlignmentForShape(element_shape),\n root_value.GetBasePointer(), root_value.GetBasePointeeType(), b());\n Store(Load(IrShapeType(element_shape), source), destination);\n }\n }\n}\nabsl::StatusOr IrEmitter::EmitComputation(\n HloComputation* computation, absl::string_view function_name_prefix,\n bool is_top_level_computation,\n absl::Span instruction_order,\n bool allow_reassociation,\n absl::Span function_attributes) {\n std::string function_name = name_uniquer_.GetUniqueName(function_name_prefix);\n VLOG(2) << \"Emitting IR for CPU function [\" << function_name_prefix << \"]\";\n is_top_level_computation_ = is_top_level_computation;\n allow_reassociation_ = allow_reassociation;\n num_dynamic_loop_bounds_ = 0;\n auto backend_config_or =\n computation->root_instruction()->backend_config();\n if (backend_config_or.ok() &&\n !backend_config_or->outer_dimension_partitions().empty()) {\n num_dynamic_loop_bounds_ =\n backend_config_or->outer_dimension_partitions().size();\n }\n if (computation->root_instruction()->opcode() != HloOpcode::kOutfeed) {\n TF_ASSIGN_OR_RETURN(\n computation_root_allocation_,\n assignment_.GetUniqueTopLevelSlice(computation->root_instruction()));\n }\n bool has_thread_local_param = false;\n for (const HloInstruction* param : computation->parameter_instructions()) {\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice param_slice,\n assignment_.GetUniqueTopLevelSlice(param));\n has_thread_local_param |= param_slice.allocation()->is_thread_local();\n computation_parameter_allocations_[param_slice.allocation()->index()] =\n param->parameter_number();\n }\n InitializeIrFunction(function_name);\n bool use_rdtscp = arch_type_ == llvm::Triple::ArchType::x86 ||\n arch_type_ == llvm::Triple::ArchType::x86_64;\n profiling_state_ = ProfilingState(use_rdtscp);\n tracing_state_.set_enabled(\n computation->parent()->config().cpu_traceme_enabled());\n llvm::IRBuilderBase::FastMathFlagGuard guard(*b());\n llvm::FastMathFlags flags = b()->getFastMathFlags();\n flags.setAllowReassoc(flags.allowReassoc() || allow_reassociation);\n b()->setFastMathFlags(flags);\n TF_RETURN_IF_ERROR(computation->AcceptOrdered(this, instruction_order));\n llvm::Function* ir_function = compute_function()->function();\n for (llvm::Attribute::AttrKind attr : function_attributes) {\n ir_function->addFnAttr(attr);\n }\n InsertOrDie(&emitted_functions_,\n ComputationToEmit{computation, allow_reassociation}, ir_function);\n const BufferAllocation* root_allocation =\n computation_root_allocation_.allocation();\n if (root_allocation &&\n (root_allocation->is_thread_local() ||\n (root_allocation->is_constant() && has_thread_local_param))) {\n EmitThreadLocalFunctionEpilogue(computation);\n }\n PopComputeFunction();\n computation_root_allocation_ = BufferAllocation::Slice();\n computation_parameter_allocations_.clear();\n return ir_function;\n}\nvoid IrEmitter::InitializeIrFunction(const std::string& function_name) {\n llvm::Function::LinkageTypes linkage =\n is_top_level_computation_ ? llvm::GlobalValue::ExternalLinkage\n : llvm::GlobalValue::InternalLinkage;\n compute_function_.emplace(function_name, linkage, hlo_module_config_, module_,\n b(), num_dynamic_loop_bounds_);\n}\nabsl::Status IrEmitter::HandleBitcast(HloInstruction* bitcast) {\n VLOG(2) << \"HandleBitcast: \" << bitcast->ToString();\n emitted_value_[bitcast] = GetEmittedValueFor(bitcast->operand(0));\n return absl::OkStatus();\n}\nllvm::Constant* IrEmitter::EmitGlobalForLiteral(const Literal& literal) {\n llvm::Constant* initializer =\n llvm_ir::ConvertLiteralToIrConstant(literal, module_);\n llvm::GlobalVariable* result_global = new llvm::GlobalVariable(\n *module_,\n initializer->getType(),\n true,\n llvm::GlobalValue::PrivateLinkage,\n initializer,\n \"\");\n result_global->setAlignment(\n llvm::Align(MinimumAlignmentForShape(literal.shape())));\n result_global->setUnnamedAddr(llvm::GlobalVariable::UnnamedAddr::Global);\n return result_global;\n}\nabsl::Status IrEmitter::EmitConstantGlobals() {\n for (const BufferAllocation& allocation : assignment_.Allocations()) {\n if (!allocation.is_constant()) {\n continue;\n }\n const Literal& literal = llvm_ir::LiteralForConstantAllocation(allocation);\n llvm::Constant* global_for_const;\n auto it = emitted_literals_.find(LayoutSensitiveLiteralWrapper{literal});\n if (it != emitted_literals_.end()) {\n global_for_const = it->second;\n } else {\n global_for_const = EmitGlobalForLiteral(literal);\n InsertOrDie(&emitted_literals_, LayoutSensitiveLiteralWrapper{literal},\n global_for_const);\n }\n InsertOrDie(&constant_buffer_to_global_, allocation.index(),\n global_for_const);\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleConstant(HloInstruction* constant) {\n VLOG(2) << \"HandleConstant: \" << constant->ToString();\n return EmitTargetAddressForOp(constant);\n}\nabsl::Status IrEmitter::HandleCopy(HloInstruction* copy) {\n if (copy->shape().IsTuple() ||\n (copy->shape().IsArray() &&\n LayoutUtil::Equal(copy->operand(0)->shape().layout(),\n copy->shape().layout()))) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(copy));\n return EmitMemcpy(*(copy->operand(0)), *copy);\n } else if (copy->shape().IsArray()) {\n return DefaultAction(copy);\n }\n return Unimplemented(\"unsupported operand type %s for copy instruction\",\n PrimitiveType_Name(copy->shape().element_type()));\n}\nint MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) {\n int64_t byte_size = ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);\n DCHECK_GE(byte_size, 0);\n DCHECK_LE(byte_size, 16);\n return std::min(int64_t{8}, byte_size);\n}\nint IrEmitter::MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) {\n return ::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type);\n}\nint64_t IrEmitter::ByteSizeOf(const Shape& shape) const {\n return llvm_ir::ByteSizeOf(shape, module_->getDataLayout());\n}\nint IrEmitter::MinimumAlignmentForShape(const Shape& shape) {\n if (ShapeUtil::IsScalar(shape)) {\n return MinimumAlignmentForPrimitiveType(shape.element_type());\n }\n int64_t buffer_size = ByteSizeOf(shape);\n DCHECK_GE(buffer_size, 0);\n DCHECK_LE(buffer_size, SIZE_MAX);\n return target_machine_features_.minimum_alignment_for_allocation(buffer_size);\n}\nvoid IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load,\n const Shape& shape) {\n int alignment = MinimumAlignmentForShape(shape);\n if (alignment > 1) {\n llvm_ir::SetAlignmentMetadataForLoad(load, alignment);\n }\n}\nvoid IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load,\n int64_t buffer_size) {\n int alignment =\n target_machine_features_.minimum_alignment_for_allocation(buffer_size);\n if (alignment > 1) {\n llvm_ir::SetAlignmentMetadataForLoad(load, alignment);\n }\n}\nvoid IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load,\n const Shape& shape) {\n AttachDereferenceableMetadataForLoad(load, ByteSizeOf(shape));\n}\nvoid IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load,\n int64_t buffer_size) {\n if (buffer_size > 0) {\n llvm_ir::SetDereferenceableMetadataForLoad(load, buffer_size);\n }\n}\nvoid IrEmitter::AttachInvariantLoadMetadataForLoad(llvm::LoadInst* load) const {\n AttachInvariantLoadMetadataForLoad(load, hlo_module_config_);\n}\n void IrEmitter::AttachInvariantLoadMetadataForLoad(\n llvm::LoadInst* load, const HloModuleConfig& config) {\n if (config.debug_options().xla_llvm_enable_invariant_load_metadata()) {\n load->setMetadata(llvm::LLVMContext::MD_invariant_load,\n llvm::MDNode::get(load->getContext(), {}));\n }\n}\nabsl::Status IrEmitter::HandleGetTupleElement(\n HloInstruction* get_tuple_element) {\n const HloInstruction* operand = get_tuple_element->operand(0);\n const Shape& shape = get_tuple_element->shape();\n emitted_value_[get_tuple_element] = llvm_ir::EmitGetTupleElement(\n shape, get_tuple_element->tuple_index(), MinimumAlignmentForShape(shape),\n GetEmittedValueFor(operand), IrShapeType(operand->shape()), b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleSelect(HloInstruction* select) {\n auto pred = select->operand(0);\n TF_RET_CHECK(pred->shape().element_type() == PRED);\n return DefaultAction(select);\n}\nabsl::Status IrEmitter::HandleInfeed(HloInstruction* instruction) {\n HloInfeedInstruction* infeed = Cast(instruction);\n VLOG(2) << \"HandleInfeed: \" << infeed->ToString();\n const Shape& data_shape = infeed->infeed_shape();\n DCHECK(ShapeUtil::Equal(data_shape,\n ShapeUtil::GetTupleElementShape(infeed->shape(), 0)));\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(infeed));\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice,\n assignment_.GetUniqueSlice(infeed, {0}));\n llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape);\n llvm::Type* data_type = IrShapeType(data_shape);\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice token_slice,\n assignment_.GetUniqueSlice(infeed, {1}));\n llvm::Value* token_address = EmitBufferPointer(\n token_slice, ShapeUtil::GetTupleElementShape(infeed->shape(), 1));\n llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address}, b());\n if (data_shape.IsTuple()) {\n TF_RET_CHECK(!ShapeUtil::IsNestedTuple(data_shape));\n std::vector tuple_element_addresses;\n for (int i = 0; i < data_shape.tuple_shapes_size(); ++i) {\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice buffer,\n assignment_.GetUniqueSlice(infeed, {0, i}));\n const Shape& tuple_element_shape =\n ShapeUtil::GetTupleElementShape(data_shape, i);\n llvm::Value* tuple_element_address =\n EmitBufferPointer(buffer, tuple_element_shape);\n TF_RETURN_IF_ERROR(EmitXfeedTransfer(\n XfeedKind::kInfeed, tuple_element_shape, tuple_element_address));\n tuple_element_addresses.push_back(tuple_element_address);\n }\n llvm_ir::EmitTuple(llvm_ir::IrArray(data_address, data_type, data_shape),\n tuple_element_addresses, b());\n } else {\n TF_RETURN_IF_ERROR(\n EmitXfeedTransfer(XfeedKind::kInfeed, data_shape, data_address));\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,\n llvm::Value* program_buffer_address) {\n int64_t length = ByteSizeOf(shape);\n if (length < 0 || length > std::numeric_limits::max()) {\n return InvalidArgument(\n \"xfeed (infeed or outfeed) buffer length %d is outside the valid \"\n \"size range\",\n length);\n }\n int32_t length_32 = static_cast(length);\n int32_t shape_length;\n TF_ASSIGN_OR_RETURN(\n llvm::Value * shape_ptr,\n llvm_ir::EncodeSelfDescribingShapeConstant(shape, &shape_length, b()));\n const char* acquire_func_name =\n kind == XfeedKind::kInfeed\n ? runtime::kAcquireInfeedBufferForDequeueSymbolName\n : runtime::kAcquireOutfeedBufferForPopulationSymbolName;\n llvm::Value* acquired_pointer = EmitCallToFunc(\n acquire_func_name,\n {GetExecutableRunOptionsArgument(), b()->getInt32(length_32), shape_ptr,\n b()->getInt32(shape_length)},\n b()->getPtrTy());\n if (kind == XfeedKind::kInfeed) {\n MemCpy(program_buffer_address, llvm::Align(1),\n acquired_pointer,\n llvm::Align(1), length_32);\n } else {\n MemCpy(acquired_pointer, llvm::Align(1),\n program_buffer_address,\n llvm::Align(1), length_32);\n if (emit_code_for_msan_) {\n const llvm::DataLayout& dl = module_->getDataLayout();\n llvm::Type* intptr_type = b()->getIntPtrTy(dl);\n EmitCallToFunc(\n \"__msan_unpoison\",\n {acquired_pointer, llvm::ConstantInt::get(intptr_type, length)},\n b()->getVoidTy());\n }\n }\n const char* release_func_name =\n kind == XfeedKind::kInfeed\n ? runtime::kReleaseInfeedBufferAfterDequeueSymbolName\n : runtime::kReleaseOutfeedBufferAfterPopulationSymbolName;\n EmitCallToFunc(release_func_name,\n {GetExecutableRunOptionsArgument(), b()->getInt32(length_32),\n acquired_pointer, shape_ptr, b()->getInt32(shape_length)},\n b()->getVoidTy());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleOutfeed(HloInstruction* outfeed) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(outfeed));\n HloInstruction* operand = outfeed->operands()[0];\n const Shape& operand_shape = operand->shape();\n llvm::Value* value = GetEmittedValueFor(operand);\n if (!operand_shape.IsTuple()) {\n return EmitXfeedTransfer(XfeedKind::kOutfeed, operand_shape, value);\n }\n TF_RET_CHECK(!ShapeUtil::IsNestedTuple(operand_shape));\n for (int i = 0; i < operand_shape.tuple_shapes_size(); ++i) {\n const Shape& tuple_element_shape =\n ShapeUtil::GetTupleElementShape(operand_shape, i);\n llvm::Value* tuple_element = llvm_ir::EmitGetTupleElement(\n tuple_element_shape, i, MinimumAlignmentForShape(tuple_element_shape),\n value, IrShapeType(operand_shape), b());\n TF_RETURN_IF_ERROR(EmitXfeedTransfer(XfeedKind::kOutfeed,\n tuple_element_shape, tuple_element));\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleSort(HloInstruction* hlo) {\n const HloSortInstruction* sort = Cast(hlo);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(sort));\n Shape keys_shape = sort->keys()->shape();\n PrimitiveType keys_type = keys_shape.element_type();\n if (!primitive_util::IsArrayType(keys_type)) {\n return Unimplemented(\"Element type %s not supported in the Sort op on CPU.\",\n PrimitiveType_Name(keys_type));\n }\n std::vector destination_addresses(sort->operand_count());\n for (int64_t i = 0; i < sort->operand_count(); ++i) {\n ShapeIndex shape_index =\n sort->values_count() > 0 ? ShapeIndex({i}) : ShapeIndex({});\n const HloInstruction* operand = sort->operand(i);\n TF_RET_CHECK(\n LayoutUtil::LayoutsInShapesEqual(keys_shape, operand->shape()));\n TF_RET_CHECK(LayoutUtil::LayoutsInShapesEqual(\n keys_shape, ShapeUtil::GetSubshape(sort->shape(), shape_index)));\n auto destination_buffer = GetAllocationSlice(*sort, shape_index);\n destination_addresses[i] =\n EmitBufferPointer(destination_buffer, operand->shape());\n auto source_address = GetAllocationSlice(*operand);\n if (destination_buffer != source_address) {\n int64_t primitive_type_size =\n ShapeUtil::ByteSizeOfPrimitiveType(operand->shape().element_type());\n auto source_buffer = GetEmittedValueFor(operand);\n int64_t size = ByteSizeOf(operand->shape());\n MemCpy(destination_addresses[i],\n llvm::Align(primitive_type_size), source_buffer,\n llvm::Align(primitive_type_size), size);\n }\n }\n Shape normalized_keys_shape =\n ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(keys_shape);\n auto logical_to_physical =\n LayoutUtil::MakeLogicalToPhysical(keys_shape.layout());\n TF_RET_CHECK(sort->sort_dimension() < logical_to_physical.size());\n int64_t physical_dimension_to_sort =\n logical_to_physical[sort->sort_dimension()];\n int64_t sort_dimension_elements =\n normalized_keys_shape.dimensions(physical_dimension_to_sort);\n int64_t higher_dimensions = 1;\n for (int64_t i = 0; i < physical_dimension_to_sort; ++i) {\n higher_dimensions *= normalized_keys_shape.dimensions(i);\n }\n int64_t lower_dimensions = 1;\n for (int64_t i = normalized_keys_shape.rank() - 1;\n i > physical_dimension_to_sort; --i) {\n lower_dimensions *= normalized_keys_shape.dimensions(i);\n }\n CHECK(absl::c_binary_search(thread_local_computations_, sort->to_apply()));\n llvm::Value* values = llvm_ir::EmitAllocaAtFunctionEntryWithCount(\n b()->getPtrTy(), b()->getInt32(sort->operand_count()), \"cc_values_alloca\",\n b());\n llvm::Value* sizes = llvm_ir::EmitAllocaAtFunctionEntryWithCount(\n b()->getInt32Ty(), b()->getInt32(sort->operand_count()),\n \"cc_sizes_alloca\", b());\n for (int64_t i = 0; i < sort->operand_count(); ++i) {\n llvm::Value* slot_in_values_alloca =\n ConstInBoundsGEP1_32(b()->getPtrTy(), values, i);\n Store(destination_addresses[i], slot_in_values_alloca);\n llvm::Value* slot_in_sizes_alloca =\n ConstInBoundsGEP1_32(b()->getInt32Ty(), sizes, i);\n llvm::Value* size = b()->getInt32(ShapeUtil::ByteSizeOfPrimitiveType(\n sort->operand(i)->shape().element_type()));\n Store(size, slot_in_sizes_alloca);\n }\n auto less_than_function =\n FindOrDie(emitted_functions_,\n ComputationToEmit{sort->to_apply(), allow_reassociation_});\n EmitCallToFunc(\n runtime::kKeyValueSortSymbolName,\n {b()->getInt64(higher_dimensions), b()->getInt64(sort_dimension_elements),\n b()->getInt64(lower_dimensions), values,\n b()->getInt32(sort->operand_count()), sizes,\n b()->getInt1(sort->is_stable()), GetExecutableRunOptionsArgument(),\n GetProfileCountersArgument(), less_than_function},\n b()->getVoidTy());\n if (sort->values_count() > 0) {\n llvm_ir::EmitTuple(GetIrArrayFor(sort), destination_addresses, b());\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleTuple(HloInstruction* tuple) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(tuple));\n llvm::SmallVector base_ptrs;\n for (auto operand : tuple->operands()) {\n base_ptrs.push_back(GetEmittedValueFor(operand));\n }\n llvm_ir::EmitTuple(GetIrArrayFor(tuple), base_ptrs, b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) {\n bool saved_allow_reassociation = allow_reassociation_;\n allow_reassociation_ = true;\n absl::Status status = DefaultAction(reduce_window);\n allow_reassociation_ = saved_allow_reassociation;\n return status;\n}\nabsl::Status IrEmitter::HandleSelectAndScatter(\n HloInstruction* select_and_scatter) {\n CHECK_EQ(select_and_scatter->operand_count(), 3);\n const auto operand = select_and_scatter->operand(0);\n const auto source = select_and_scatter->operand(1);\n return HandleSelectAndScatter(select_and_scatter, GetIrArrayFor(operand),\n GetIrArrayFor(source),\n GetIrArrayFor(select_and_scatter));\n}\nabsl::Status IrEmitter::HandleSelectAndScatter(\n HloInstruction* select_and_scatter, const llvm_ir::IrArray& operand_array,\n const llvm_ir::IrArray& source_array,\n const llvm_ir::IrArray& output_array) {\n CHECK_EQ(select_and_scatter->operand_count(), 3);\n const auto operand = select_and_scatter->operand(0);\n const auto source = select_and_scatter->operand(1);\n const auto init_value = select_and_scatter->operand(2);\n const Window& window = select_and_scatter->window();\n PrimitiveType operand_element_type = operand->shape().element_type();\n const int64_t rank = operand->shape().rank();\n CHECK_EQ(rank, source->shape().rank());\n CHECK_EQ(rank, window.dimensions_size());\n if (window_util::HasDilation(window)) {\n return Unimplemented(\n \"Dilation for SelectAndScatter is not implemented on CPU. \");\n }\n TF_RETURN_IF_ERROR(EmitTargetElementLoop(\n select_and_scatter, IrName(select_and_scatter, \"init\"),\n [this, init_value](const llvm_ir::IrArray::Index& target_index) {\n llvm::Value* init_value_addr = GetEmittedValueFor(init_value);\n return Load(IrShapeType(init_value->shape()), init_value_addr);\n },\n std::optional(output_array)));\n llvm_ir::ForLoopNest source_loops(IrName(select_and_scatter), b());\n const llvm_ir::IrArray::Index source_index =\n source_loops.AddLoopsForShape(source->shape(), \"source\");\n SetToFirstInsertPoint(source_loops.GetInnerLoopBodyBasicBlock(), b());\n llvm::AllocaInst* selected_value_address = llvm_ir::EmitAllocaAtFunctionEntry(\n llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),\n \"selected_value_address\", b(),\n MinimumAlignmentForPrimitiveType(operand_element_type));\n llvm::AllocaInst* selected_index_address =\n llvm_ir::EmitAllocaAtFunctionEntryWithCount(\n b()->getInt64Ty(), b()->getInt32(rank), \"selected_index_address\",\n b());\n llvm::AllocaInst* initialized_flag_address =\n llvm_ir::EmitAllocaAtFunctionEntry(b()->getInt1Ty(),\n \"initialized_flag_address\", b());\n Store(b()->getInt1(false), initialized_flag_address);\n llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, \"window\"), b());\n llvm::SmallVector window_size;\n for (const auto& dim : window.dimensions()) {\n window_size.push_back(dim.size());\n }\n const llvm_ir::IrArray::Index window_index = window_loops.AddLoopsForShape(\n ShapeUtil::MakeShape(operand_element_type, window_size), \"window\");\n SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(), b());\n llvm::SmallVector operand_multi_index(source_index.size());\n llvm::Value* in_bounds_condition = b()->getTrue();\n for (int64_t i = 0; i < rank; ++i) {\n llvm::Value* strided_index =\n NSWMul(source_index[i], b()->getInt64(window.dimensions(i).stride()));\n operand_multi_index[i] =\n NSWSub(NSWAdd(strided_index, window_index[i]),\n b()->getInt64(window.dimensions(i).padding_low()));\n llvm::Value* index_condition =\n ICmpULT(operand_multi_index[i],\n b()->getInt64(ShapeUtil::GetDimension(operand->shape(), i)));\n in_bounds_condition = And(in_bounds_condition, index_condition);\n }\n CHECK(in_bounds_condition != nullptr);\n llvm_ir::LlvmIfData if_in_bounds =\n llvm_ir::EmitIfThenElse(in_bounds_condition, \"in-bounds\", b());\n SetToFirstInsertPoint(if_in_bounds.true_block, b());\n llvm_ir::LlvmIfData if_initialized =\n llvm_ir::EmitIfThenElse(Load(initialized_flag_address->getAllocatedType(),\n initialized_flag_address),\n \"initialized\", b());\n SetToFirstInsertPoint(if_initialized.false_block, b());\n const auto save_operand_index =\n [&](const llvm_ir::IrArray::Index& operand_index) {\n for (int64_t i = 0; i < rank; ++i) {\n llvm::Value* selected_index_address_slot =\n InBoundsGEP(selected_index_address->getAllocatedType(),\n selected_index_address, {b()->getInt32(i)});\n Store(operand_index[i], selected_index_address_slot);\n }\n };\n llvm_ir::IrArray::Index operand_index(\n operand_multi_index, operand_array.GetShape(), b()->getInt64Ty());\n llvm::Value* operand_data =\n operand_array.EmitReadArrayElement(operand_index, b());\n Store(operand_data, selected_value_address);\n save_operand_index(operand_index);\n Store(b()->getInt1(true), initialized_flag_address);\n SetToFirstInsertPoint(if_initialized.true_block, b());\n llvm::Value* operand_address =\n operand_array.EmitArrayElementAddress(operand_index, b());\n llvm::Value* operand_element =\n Load(operand_array.GetElementLlvmType(), operand_address);\n llvm::Value* result = EmitScalarReturningThreadLocalCall(\n *select_and_scatter->select(),\n {Load(selected_value_address->getAllocatedType(), selected_value_address),\n operand_element},\n \"select_function\");\n llvm::Value* cond = ICmpNE(\n result,\n llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),\n \"boolean_predicate\");\n llvm_ir::LlvmIfData if_select_lhs =\n llvm_ir::EmitIfThenElse(cond, \"if-select-lhs\", b());\n SetToFirstInsertPoint(if_select_lhs.false_block, b());\n Store(Load(operand_array.GetElementLlvmType(), operand_address),\n selected_value_address);\n save_operand_index(operand_index);\n SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(), b());\n llvm::SmallVector selected_multi_index;\n for (int64_t i = 0; i < rank; ++i) {\n const std::vector gep_index = {b()->getInt32(i)};\n llvm::Value* selected_index_address_slot =\n InBoundsGEP(selected_index_address->getAllocatedType(),\n selected_index_address, gep_index);\n llvm::Type* type = llvm::GetElementPtrInst::getIndexedType(\n selected_index_address->getAllocatedType(), gep_index);\n selected_multi_index.push_back(Load(type, selected_index_address_slot));\n }\n llvm::Value* source_value =\n source_array.EmitReadArrayElement(source_index, b());\n llvm_ir::IrArray::Index selected_index(\n selected_multi_index, output_array.GetShape(), source_index.GetType());\n llvm::Value* output_value =\n output_array.EmitReadArrayElement(selected_index, b());\n llvm::Value* scatter_value = EmitScalarReturningThreadLocalCall(\n *select_and_scatter->scatter(), {output_value, source_value},\n \"scatter_function\");\n output_array.EmitWriteArrayElement(selected_index, scatter_value, b());\n SetToFirstInsertPoint(source_loops.GetOuterLoopExitBasicBlock(), b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleDot(HloInstruction* dot) {\n auto lhs = dot->operand(0);\n auto rhs = dot->operand(1);\n TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(\n *dot, {lhs, rhs},\n {PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128}));\n const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();\n if (dnums.lhs_contracting_dimensions_size() != 1) {\n return Unimplemented(\n \"Dot with multiple contracting dimensions not implemented.\");\n }\n llvm_ir::IrArray lhs_array(GetIrArrayFor(lhs));\n llvm_ir::IrArray rhs_array(GetIrArrayFor(rhs));\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dot));\n llvm_ir::IrArray target_array = GetIrArrayFor(dot);\n VLOG(2) << \"HandleDot: \";\n VLOG(2) << \" lhs operand: \"\n << llvm_ir::DumpToString(lhs_array.GetBasePointer());\n VLOG(2) << \" rhs operand: \"\n << llvm_ir::DumpToString(rhs_array.GetBasePointer());\n VLOG(2) << \" target: \"\n << llvm_ir::DumpToString(target_array.GetBasePointer());\n return EmitDotOperation(*dot, target_array, lhs_array, rhs_array,\n nullptr,\n GetExecutableRunOptionsArgument(), b(),\n hlo_module_config_, target_machine_features_);\n}\nabsl::Status IrEmitter::HandleConvolution(HloInstruction* convolution) {\n auto lhs = convolution->operand(0);\n auto rhs = convolution->operand(1);\n TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(\n *convolution, {lhs, rhs},\n {PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128}));\n if (PotentiallyImplementedAsEigenConvolution(*convolution,\n target_machine_features_)) {\n const Shape& lhs_shape = lhs->shape();\n const Shape& rhs_shape = rhs->shape();\n const Shape& convolution_shape = convolution->shape();\n if (LayoutUtil::IsMonotonicWithDim0Major(lhs_shape.layout()) &&\n LayoutUtil::IsMonotonicWithDim0Major(rhs_shape.layout()) &&\n LayoutUtil::IsMonotonicWithDim0Major(convolution_shape.layout())) {\n bool one_dim_convolution = lhs_shape.dimensions_size() == 3;\n llvm::Value* lhs_address = GetEmittedValueFor(lhs);\n llvm::Value* rhs_address = GetEmittedValueFor(rhs);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(convolution));\n const ConvolutionDimensionNumbers& dnums =\n convolution->convolution_dimension_numbers();\n absl::InlinedVector input_dims;\n absl::InlinedVector kernel_dims;\n absl::InlinedVector output_dims;\n if (one_dim_convolution) {\n input_dims.push_back(1);\n kernel_dims.push_back(1);\n output_dims.push_back(1);\n }\n const Shape& input_shape = convolution->operand(0)->shape();\n int64_t input_batch =\n input_shape.dimensions(dnums.input_batch_dimension());\n for (int d : dnums.input_spatial_dimensions()) {\n input_dims.push_back(input_shape.dimensions(d));\n }\n int64_t input_channels =\n input_shape.dimensions(dnums.input_feature_dimension());\n const Shape& kernel_shape = convolution->operand(1)->shape();\n for (int d : dnums.kernel_spatial_dimensions()) {\n kernel_dims.push_back(kernel_shape.dimensions(d));\n }\n int64_t kernel_channels =\n kernel_shape.dimensions(dnums.kernel_input_feature_dimension());\n int64_t kernel_filters =\n kernel_shape.dimensions(dnums.kernel_output_feature_dimension());\n const Shape& convolution_shape = convolution->shape();\n for (int d : dnums.output_spatial_dimensions()) {\n output_dims.push_back(convolution_shape.dimensions(d));\n }\n const Window& window = convolution->window();\n absl::InlinedVector strides;\n absl::InlinedVector, 2> padding;\n absl::InlinedVector base_dilation;\n absl::InlinedVector window_dilation;\n if (one_dim_convolution) {\n strides.push_back(1);\n padding.push_back({0, 0});\n base_dilation.push_back(1);\n window_dilation.push_back(1);\n }\n for (const auto& d : window.dimensions()) {\n strides.push_back(d.stride());\n padding.push_back({d.padding_low(), d.padding_high()});\n base_dilation.push_back(d.base_dilation());\n window_dilation.push_back(d.window_dilation());\n }\n PrimitiveType primitive_type = lhs->shape().element_type();\n bool multi_threaded =\n hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen();\n bool use_mkl_dnn =\n hlo_module_config_.debug_options().xla_cpu_use_mkl_dnn() &&\n convolution->feature_group_count() == 1;\n bool use_acl = hlo_module_config_.debug_options().xla_cpu_use_acl();\n auto valid_num_dims = [](absl::Span xs) {\n return xs.size() >= 2 && xs.size() <= 3;\n };\n TF_RET_CHECK(valid_num_dims(input_dims)) << input_dims.size();\n TF_RET_CHECK(valid_num_dims(kernel_dims));\n TF_RET_CHECK(valid_num_dims(output_dims));\n TF_RET_CHECK(valid_num_dims(strides));\n TF_RET_CHECK(padding.size() >= 2 && padding.size() <= 3);\n TF_RET_CHECK(valid_num_dims(base_dilation));\n TF_RET_CHECK(valid_num_dims(window_dilation));\n const char* fn_name;\n if (input_dims.size() == 2) {\n fn_name =\n primitive_type == F16\n ? (multi_threaded\n ? runtime::kEigenConv2DF16SymbolName\n : runtime::kEigenSingleThreadedConv2DF16SymbolName)\n : (multi_threaded\n ? (use_mkl_dnn\n ? runtime::kMKLConv2DF32SymbolName\n : (use_acl ? runtime::kACLConv2DF32SymbolName\n : runtime::kEigenConv2DF32SymbolName))\n : runtime::kEigenSingleThreadedConv2DF32SymbolName);\n } else if (input_dims.size() == 3) {\n fn_name =\n primitive_type == F16\n ? (multi_threaded\n ? runtime::kEigenConv3DF16SymbolName\n : runtime::kEigenSingleThreadedConv3DF16SymbolName)\n : (multi_threaded\n ? runtime::kEigenConv3DF32SymbolName\n : runtime::kEigenSingleThreadedConv3DF32SymbolName);\n } else {\n LOG(FATAL) << \"Invalid number of dimensions \" << input_dims.size();\n }\n if (!multi_threaded && use_mkl_dnn) {\n LOG(WARNING) << \"Using Eigen instead of MKL-DNN for single-threaded \"\n \"convolution.\";\n }\n std::vector args = {\n GetExecutableRunOptionsArgument(),\n GetEmittedValueFor(convolution),\n lhs_address,\n rhs_address,\n b()->getInt64(input_batch),\n };\n for (int64_t d : input_dims) {\n args.push_back(b()->getInt64(d));\n }\n args.push_back(b()->getInt64(input_channels));\n for (int64_t d : kernel_dims) {\n args.push_back(b()->getInt64(d));\n }\n args.push_back(b()->getInt64(kernel_channels));\n args.push_back(b()->getInt64(kernel_filters));\n for (int64_t d : output_dims) {\n args.push_back(b()->getInt64(d));\n }\n for (int64_t d : strides) {\n args.push_back(b()->getInt64(d));\n }\n for (const auto& p : padding) {\n args.push_back(b()->getInt64(p.first));\n args.push_back(b()->getInt64(p.second));\n }\n for (int64_t d : base_dilation) {\n args.push_back(b()->getInt64(d));\n }\n for (int64_t d : window_dilation) {\n args.push_back(b()->getInt64(d));\n }\n args.push_back(b()->getInt64(convolution->feature_group_count()));\n VLOG(1) << \"Ir emitter emitted Convolution to runtime:\" << fn_name;\n EmitCallToFunc(fn_name, args, b()->getVoidTy(),\n true,\n true);\n return absl::OkStatus();\n }\n }\n return DefaultAction(convolution);\n}\nabsl::Status IrEmitter::HandleFft(HloInstruction* fft) {\n auto operand = fft->operand(0);\n TF_RETURN_IF_ERROR(ElementTypesSameAndSupported(\n *fft, {operand},\n {F32, F64, C64, C128}));\n TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(operand->shape().layout()));\n TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(fft->shape().layout()));\n VLOG(3) << \"operand=\" << ShapeUtil::HumanStringWithLayout(operand->shape());\n VLOG(3) << \"fft=\" << ShapeUtil::HumanStringWithLayout(fft->shape());\n llvm::Value* operand_address = GetEmittedValueFor(operand);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fft));\n const std::vector& fft_length = fft->fft_length();\n const int fft_rank = fft_length.size();\n absl::InlinedVector operand_shape_flat(fft_rank + 1);\n int64_t input_batch = 1;\n int64_t input_batch_length = fft->shape().dimensions_size() - fft_rank;\n for (int i = 0; i < input_batch_length; i++) {\n input_batch *= operand->shape().dimensions(i);\n }\n operand_shape_flat[0] = input_batch;\n for (int i = 0; i < fft_rank; ++i) {\n operand_shape_flat[i + 1] =\n operand->shape().dimensions(i + input_batch_length);\n }\n bool multi_threaded_eigen =\n hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen();\n const char* fn_name = multi_threaded_eigen\n ? runtime::kDuccFftSymbolName\n : runtime::kDuccSingleThreadedFftSymbolName;\n auto* fft_lengths =\n EmitGlobalForLiteral(LiteralUtil::CreateR1(fft_length));\n auto* input_shape =\n EmitGlobalForLiteral(LiteralUtil::CreateR1(operand_shape_flat));\n EmitCallToFunc(fn_name,\n {GetExecutableRunOptionsArgument(), GetEmittedValueFor(fft),\n operand_address, b()->getInt32(fft->fft_type()),\n b()->getInt32(operand->shape().element_type() == F64 ||\n operand->shape().element_type() == C128),\n b()->getInt32(fft_rank), input_shape, fft_lengths},\n b()->getVoidTy(), true,\n false,\n true);\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleAllReduceSingleReplica(HloInstruction* crs) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs));\n if (crs->operand_count() == 1) {\n return EmitMemcpy(*crs->operand(0), *crs);\n }\n std::vector operand_ptrs;\n for (int64_t i = 0; i < crs->operand_count(); ++i) {\n llvm::Value* in_ptr = GetEmittedValueFor(crs->operand(i));\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,\n assignment_.GetUniqueSlice(crs, {i}));\n const Shape& operand_shape = crs->operand(i)->shape();\n CHECK(operand_shape.IsArray())\n << \"Operands to all-reduce must be arrays: \" << crs->ToString();\n operand_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));\n MemCpy(operand_ptrs.back(), llvm::Align(1), in_ptr,\n llvm::Align(1), ShapeUtil::ByteSizeOf(operand_shape));\n }\n llvm_ir::EmitTuple(GetIrArrayFor(crs), operand_ptrs, b());\n return absl::OkStatus();\n}\nstatic bool DataTypeIsSupportedByReduceScatter(PrimitiveType datatype) {\n switch (datatype) {\n case PRED:\n case S8:\n case U8:\n case S16:\n case U16:\n case S32:\n case U32:\n case S64:\n case U64:\n case F16:\n case F32:\n case F64:\n case C64:\n case C128:\n return true;\n default:\n return false;\n }\n}\nabsl::Status IrEmitter::HandleAllReduceMultipleReplica(HloInstruction* crs) {\n CHECK_GE(crs->operand_count(), 1);\n PrimitiveType datatype = crs->operand(0)->shape().element_type();\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs));\n if (!DataTypeIsSupportedByReduceScatter(datatype)) {\n return Unimplemented(\"AllReduce for datatype '%s' is not supported\",\n primitive_util::LowercasePrimitiveTypeName(datatype));\n }\n if (!MatchReductionComputation(crs->to_apply()).has_value()) {\n return Unimplemented(\"AllReduce for computation '%s' is not supported\",\n crs->to_apply()->ToString());\n }\n std::string replica_groups = ReplicaGroupsToString(crs->replica_groups());\n int32_t replica_groups_size = replica_groups.size();\n llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);\n bool is_tuple = crs->operand_count() > 1;\n std::vector input_buffer_ptrs;\n std::vector output_buffer_ptrs;\n if (is_tuple) {\n CHECK(crs->shape().IsTuple());\n for (int64_t i = 0; i < crs->operand_count(); i++) {\n const HloInstruction* op = crs->operand(i);\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,\n assignment_.GetUniqueSlice(crs, {i}));\n const Shape& operand_shape = crs->operand(i)->shape();\n CHECK(operand_shape.IsArray())\n << \"Operands to all-reduce must be arrays: \" << crs->ToString();\n output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));\n input_buffer_ptrs.push_back(GetEmittedValueFor(op));\n }\n } else {\n Shape shape = crs->operand(0)->shape();\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,\n assignment_.GetUniqueSlice(crs->operand(0), {}));\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,\n assignment_.GetUniqueSlice(crs, {}));\n input_buffer_ptrs.push_back(EmitBufferPointer(input_slice, shape));\n output_buffer_ptrs.push_back(EmitBufferPointer(output_slice, shape));\n }\n llvm::Value* input_buffers =\n EncodeArrayFunctionArguments(input_buffer_ptrs, \"input_buffers\", b());\n llvm::Value* output_buffers =\n EncodeArrayFunctionArguments(output_buffer_ptrs, \"output_buffers\", b());\n int32_t shape_length;\n TF_ASSIGN_OR_RETURN(llvm::Value * shape_ptr,\n llvm_ir::EncodeSelfDescribingShapeConstant(\n crs->shape(), &shape_length, b()));\n bool use_global_device_ids =\n Cast(crs)->use_global_device_ids();\n EmitCallToFunc(\n runtime::kAllReduceSymbolName,\n {GetExecutableRunOptionsArgument(),\n replica_groups_v,\n b()->getInt32(replica_groups_size),\n b()->getInt32(static_cast(crs->channel_id().has_value())),\n b()->getInt32(static_cast(use_global_device_ids)),\n b()->getInt64(crs->channel_id().has_value()\n ? *crs->channel_id()\n : crs->GetModule()->unique_id()),\n b()->getInt32(\n static_cast(*MatchReductionComputation(crs->to_apply()))),\n shape_ptr,\n b()->getInt32(shape_length),\n b()->getInt32(crs->operand_count()),\n input_buffers,\n output_buffers},\n b()->getVoidTy());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleAllReduce(HloInstruction* crs) {\n if (hlo_module_config_.replica_count() == 1 &&\n hlo_module_config_.num_partitions() == 1) {\n return HandleAllReduceSingleReplica(crs);\n }\n return HandleAllReduceMultipleReplica(crs);\n}\nabsl::Status IrEmitter::HandleReduceScatter(HloInstruction* rs) {\n CHECK_EQ(rs->operand_count(), 1);\n PrimitiveType datatype = rs->operand(0)->shape().element_type();\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rs));\n if (!DataTypeIsSupportedByReduceScatter(datatype)) {\n return Unimplemented(\"ReduceScatter for datatype '%s' is not supported\",\n primitive_util::LowercasePrimitiveTypeName(datatype));\n }\n if (!MatchReductionComputation(rs->to_apply()).has_value()) {\n return Unimplemented(\"ReduceScatter for computation '%s' is not supported\",\n rs->to_apply()->ToString());\n }\n std::string replica_groups = ReplicaGroupsToString(rs->replica_groups());\n int32_t replica_groups_size = replica_groups.size();\n llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);\n Shape shape = rs->operand(0)->shape();\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,\n assignment_.GetUniqueSlice(rs->operand(0), {}));\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,\n assignment_.GetUniqueSlice(rs, {}));\n llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape);\n llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape);\n bool use_global_device_ids =\n Cast(rs)->use_global_device_ids();\n EmitCallToFunc(\n runtime::kReduceScatterSymbolName,\n {GetExecutableRunOptionsArgument(),\n replica_groups_v,\n b()->getInt32(replica_groups_size),\n b()->getInt32(static_cast(rs->channel_id().has_value())),\n b()->getInt32(static_cast(use_global_device_ids)),\n b()->getInt64(rs->channel_id().has_value()\n ? *rs->channel_id()\n : rs->GetModule()->unique_id()),\n b()->getInt32(\n static_cast(*MatchReductionComputation(rs->to_apply()))),\n b()->getInt32(static_cast(datatype)),\n b()->getInt64(ShapeUtil::ElementsIn(rs->shape())),\n input_buffer,\n output_buffer},\n b()->getVoidTy());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleAllToAll(HloInstruction* instruction) {\n auto* instr = Cast(instruction);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction));\n CHECK(!instr->split_dimension() && instr->shape().IsTuple())\n << \"Only tuple AllToAll is supported\";\n std::string replica_groups =\n ReplicaGroupsToString(instruction->replica_groups());\n int32_t replica_groups_size = replica_groups.size();\n llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);\n int64_t buffer_size = -1;\n std::vector input_buffer_ptrs;\n std::vector output_buffer_ptrs;\n for (int64_t i = 0; i < instruction->operand_count(); i++) {\n const HloInstruction* op = instruction->operand(i);\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,\n assignment_.GetUniqueSlice(instruction, {i}));\n const Shape& operand_shape = instruction->operand(i)->shape();\n CHECK(operand_shape.IsArray())\n << \"Operands to all-to-all must be arrays: \" << instruction->ToString();\n output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape));\n input_buffer_ptrs.push_back(GetEmittedValueFor(op));\n CHECK(buffer_size == -1 || buffer_size == out_slice.size());\n buffer_size = out_slice.size();\n }\n llvm::Value* input_buffers =\n EncodeArrayFunctionArguments(input_buffer_ptrs, \"input_buffers\", b());\n llvm::Value* output_buffers =\n EncodeArrayFunctionArguments(output_buffer_ptrs, \"output_buffers\", b());\n EmitCallToFunc(\n runtime::kAllToAllSymbolName,\n {\n GetExecutableRunOptionsArgument(),\n b()->getInt32(\n static_cast(instruction->channel_id().has_value())),\n b()->getInt64(instruction->channel_id().has_value()\n ? *instruction->channel_id()\n : instruction->GetModule()->unique_id()),\n replica_groups_v,\n b()->getInt32(replica_groups_size),\n b()->getInt32(instruction->operand_count()),\n b()->getInt64(buffer_size),\n input_buffers,\n output_buffers,\n },\n b()->getVoidTy());\n llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleAllGather(HloInstruction* instruction) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction));\n std::string replica_groups =\n ReplicaGroupsToString(instruction->replica_groups());\n int32_t replica_groups_size = replica_groups.size();\n llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups);\n std::vector input_buffer_ptrs;\n std::vector output_buffer_ptrs;\n const HloInstruction* op = instruction->operand(0);\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice in_slice,\n assignment_.GetUniqueSlice(op, {}));\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice,\n assignment_.GetUniqueSlice(instruction, {}));\n const Shape& operand_shape = op->shape();\n CHECK(op->shape().IsArray())\n << \"Operand to all-gather must be arrays: \" << instruction->ToString();\n llvm::Value* output_buffer = EmitBufferPointer(out_slice, operand_shape);\n llvm::Value* input_buffer = GetEmittedValueFor(op);\n int64_t buffer_size = in_slice.size();\n bool use_global_device_ids =\n Cast(instruction)->use_global_device_ids();\n EmitCallToFunc(\n runtime::kAllGatherSymbolName,\n {\n GetExecutableRunOptionsArgument(),\n b()->getInt32(\n static_cast(instruction->channel_id().has_value())),\n b()->getInt32(static_cast(use_global_device_ids)),\n b()->getInt64(instruction->channel_id().has_value()\n ? *instruction->channel_id()\n : instruction->GetModule()->unique_id()),\n replica_groups_v,\n b()->getInt32(replica_groups_size),\n b()->getInt64(buffer_size),\n input_buffer,\n output_buffer,\n },\n b()->getVoidTy());\n llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleCollectivePermute(HloInstruction* crs) {\n auto* instr = Cast(crs);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instr));\n std::string source_target_pairs = absl::StrJoin(\n instr->source_target_pairs(), \",\", absl::PairFormatter(\"=\"));\n llvm::Value* source_target_pairs_v =\n b()->CreateGlobalStringPtr(source_target_pairs);\n Shape shape = crs->operand(0)->shape();\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice,\n assignment_.GetUniqueSlice(crs->operand(0), {}));\n llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape);\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,\n assignment_.GetUniqueSlice(crs, {}));\n llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape);\n EmitCallToFunc(\n runtime::kCollectivePermuteSymbolName,\n {GetExecutableRunOptionsArgument(),\n b()->getInt32(static_cast(crs->channel_id().has_value())),\n b()->getInt64(crs->channel_id().has_value()\n ? *crs->channel_id()\n : crs->GetModule()->unique_id()),\n b()->getInt32(ShapeUtil::ByteSizeOf(shape)),\n input_buffer,\n output_buffer,\n source_target_pairs_v,\n b()->getInt32(source_target_pairs.size())},\n b()->getVoidTy());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandlePartitionId(HloInstruction* hlo) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,\n assignment_.GetUniqueSlice(hlo, {}));\n llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape());\n EmitCallToFunc(runtime::kPartitionIdSymbolName,\n {GetExecutableRunOptionsArgument(),\n output_buffer},\n b()->getVoidTy());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleReplicaId(HloInstruction* hlo) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice,\n assignment_.GetUniqueSlice(hlo, {}));\n llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape());\n EmitCallToFunc(runtime::kReplicaIdSymbolName,\n {GetExecutableRunOptionsArgument(),\n output_buffer},\n b()->getVoidTy());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleParameter(HloInstruction* parameter) {\n VLOG(2) << \"HandleParameter: \" << parameter->ToString();\n return EmitTargetAddressForOp(parameter);\n}\nstatic bool ReductionPreservesLayout(const HloInstruction& reduce) {\n DCHECK_EQ(reduce.opcode(), HloOpcode::kReduce);\n absl::flat_hash_map unreduced_dim_map;\n absl::flat_hash_set reduced_dims(reduce.dimensions().begin(),\n reduce.dimensions().end());\n const Shape& operand_shape = reduce.operand(0)->shape();\n const Shape& result_shape = reduce.shape();\n int64_t delta = 0;\n for (int64_t i = 0; i < operand_shape.dimensions_size(); i++) {\n if (reduced_dims.contains(i)) {\n delta++;\n } else {\n InsertOrDie(&unreduced_dim_map, i, i - delta);\n }\n }\n int64_t result_dim_idx = 0;\n for (int64_t operand_dim_idx = 0;\n operand_dim_idx < operand_shape.dimensions_size(); operand_dim_idx++) {\n int64_t operand_dim =\n operand_shape.layout().minor_to_major(operand_dim_idx);\n if (!reduced_dims.contains(operand_dim)) {\n if (FindOrDie(unreduced_dim_map, operand_dim) !=\n result_shape.layout().minor_to_major(result_dim_idx++)) {\n return false;\n }\n }\n }\n CHECK_EQ(result_dim_idx, result_shape.dimensions_size());\n return true;\n}\nIrEmitter::ReductionGenerator IrEmitter::MatchReductionGenerator(\n HloComputation* function, std::string* failure_reason) const {\n CHECK_EQ(function->num_parameters(), 2);\n auto root_instruction = function->root_instruction();\n CHECK(ShapeUtil::IsScalar(root_instruction->shape()));\n if (root_instruction->operand_count() != 2) {\n *failure_reason = \"root instruction is not a binary operation\";\n return nullptr;\n }\n const Shape& root_shape = root_instruction->shape();\n if (ShapeUtil::ElementIsComplex(root_shape)) {\n *failure_reason = \"complex values not supported\";\n return nullptr;\n }\n bool root_is_floating_point = ShapeUtil::ElementIsFloating(root_shape);\n bool root_is_integral = ShapeUtil::ElementIsIntegral(root_shape);\n bool root_is_signed = ShapeUtil::ElementIsSigned(root_shape);\n auto lhs = root_instruction->operand(0);\n auto rhs = root_instruction->operand(1);\n auto param_0 = function->parameter_instruction(0);\n auto param_1 = function->parameter_instruction(1);\n if (!(lhs == param_0 && rhs == param_1) &&\n !(rhs == param_0 && lhs == param_1)) {\n *failure_reason =\n \"root instruction is not a binary operation on the incoming arguments\";\n return nullptr;\n }\n CHECK(ShapeUtil::IsScalar(lhs->shape()) && ShapeUtil::IsScalar(rhs->shape()));\n switch (root_instruction->opcode()) {\n default:\n *failure_reason = \"did not recognize root instruction opcode\";\n return nullptr;\n case HloOpcode::kAdd:\n return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,\n llvm::Value* rhs) {\n return root_is_integral ? b->CreateAdd(lhs, rhs)\n : b->CreateFAdd(lhs, rhs);\n };\n case HloOpcode::kMultiply:\n return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,\n llvm::Value* rhs) {\n return root_is_integral ? b->CreateMul(lhs, rhs)\n : b->CreateFMul(lhs, rhs);\n };\n case HloOpcode::kAnd:\n return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {\n return b->CreateAnd(lhs, rhs);\n };\n case HloOpcode::kOr:\n return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {\n return b->CreateOr(lhs, rhs);\n };\n case HloOpcode::kXor:\n return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {\n return b->CreateXor(lhs, rhs);\n };\n case HloOpcode::kMaximum:\n return [root_is_floating_point, root_is_signed, this](\n llvm::IRBuilder<>* b, llvm::Value* lhs,\n llvm::Value* rhs) -> llvm::Value* {\n if (root_is_floating_point) {\n return llvm_ir::EmitFloatMax(\n lhs, rhs, b,\n hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max());\n }\n return b->CreateSelect(\n b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SGE\n : llvm::ICmpInst::ICMP_UGE,\n lhs, rhs),\n lhs, rhs);\n };\n case HloOpcode::kMinimum:\n return [root_is_floating_point, root_is_signed, this](\n llvm::IRBuilder<>* b, llvm::Value* lhs,\n llvm::Value* rhs) -> llvm::Value* {\n if (root_is_floating_point) {\n return llvm_ir::EmitFloatMin(\n lhs, rhs, b,\n hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max());\n }\n return b->CreateSelect(\n b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SLE\n : llvm::ICmpInst::ICMP_ULE,\n lhs, rhs),\n lhs, rhs);\n };\n }\n}\nIrEmitter::ShardedVectorType IrEmitter::CreateShardedVectorType(\n PrimitiveType element_type, unsigned element_count) {\n int vector_register_size_in_elements =\n target_machine_features_.vector_register_byte_size(\n *compute_function()->function()) /\n ShapeUtil::ByteSizeOfPrimitiveType(element_type);\n ShardedVectorType sharded_vector_type;\n llvm::Type* element_ir_type =\n llvm_ir::PrimitiveTypeToIrType(element_type, module_);\n for (int i = 0, e = 1 + Log2Ceiling(element_count); i < e; i++) {\n const unsigned current_size_fragment = 1u << i;\n if (!(element_count & current_size_fragment)) {\n continue;\n }\n if (current_size_fragment == 1) {\n sharded_vector_type.push_back(element_ir_type);\n continue;\n }\n if (current_size_fragment >= vector_register_size_in_elements) {\n auto vector_type = llvm::VectorType::get(\n element_ir_type, vector_register_size_in_elements, false);\n sharded_vector_type.insert(\n sharded_vector_type.end(),\n current_size_fragment / vector_register_size_in_elements,\n vector_type);\n CHECK_EQ(current_size_fragment % vector_register_size_in_elements, 0);\n continue;\n }\n sharded_vector_type.push_back(\n llvm::VectorType::get(element_ir_type, current_size_fragment, false));\n }\n return sharded_vector_type;\n}\nabsl::StatusOr\nIrEmitter::EmitInnerLoopForVectorizedReduction(\n const ReductionGenerator& reduction_generator,\n const llvm_ir::IrArray::Index& output_index,\n const ShardedVectorType& accumulator_type, HloInstruction* init_value,\n HloInstruction* arg, absl::Span dimensions,\n llvm::Align element_alignment) {\n ShardedVector accumulator;\n accumulator.reserve(accumulator_type.size());\n for (auto accumulator_shard_type : accumulator_type) {\n accumulator.push_back(llvm_ir::EmitAllocaAtFunctionEntry(\n accumulator_shard_type, \"accumulator\", b(), 0));\n }\n llvm::Value* init_value_ssa =\n Load(IrShapeType(init_value->shape()), GetEmittedValueFor(init_value));\n for (llvm::Value* accumulator_shard : accumulator) {\n llvm::Value* initial_value;\n auto shard_type =\n llvm::cast(accumulator_shard)->getAllocatedType();\n if (auto vector_type = llvm::dyn_cast(shard_type)) {\n initial_value =\n VectorSplat(vector_type->getElementCount(), init_value_ssa);\n } else {\n initial_value = init_value_ssa;\n }\n AlignedStore(initial_value, accumulator_shard, element_alignment);\n }\n llvm_ir::ForLoopNest reduction_loop_nest(IrName(arg, \"vectorized_inner\"),\n b());\n std::vector input_multi_index =\n reduction_loop_nest.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,\n \"reduction_dim\");\n SetToFirstInsertPoint(reduction_loop_nest.GetInnerLoopBodyBasicBlock(), b());\n llvm_ir::IrArray arg_array(GetIrArrayFor(arg));\n llvm_ir::IrArray::Index::const_iterator it = output_index.begin();\n for (auto& i : input_multi_index) {\n if (i == nullptr) {\n i = *it++;\n }\n }\n CHECK(output_index.end() == it);\n llvm_ir::IrArray::Index input_index(input_multi_index, arg->shape(),\n b()->getInt64Ty());\n llvm::Value* input_address =\n arg_array.EmitArrayElementAddress(input_index, b());\n for (int i = 0; i < accumulator.size(); i++) {\n auto alloca = llvm::cast(accumulator[i]);\n auto current_accumulator_value = AlignedLoad(\n alloca->getAllocatedType(), accumulator[i], element_alignment);\n auto addend = AlignedLoad(alloca->getAllocatedType(), input_address,\n element_alignment);\n arg_array.AnnotateLoadStoreInstructionWithMetadata(addend);\n auto reduced_result =\n reduction_generator(b(), current_accumulator_value, addend);\n AlignedStore(reduced_result, accumulator[i], element_alignment);\n if (i != (accumulator.size() - 1)) {\n input_address =\n ConstInBoundsGEP1_32(reduced_result->getType(), input_address, 1);\n }\n }\n SetToFirstInsertPoint(reduction_loop_nest.GetOuterLoopExitBasicBlock(), b());\n ShardedVector result_ssa;\n result_ssa.reserve(accumulator.size());\n for (auto accumulator_shard : accumulator) {\n auto alloca = llvm::cast(accumulator_shard);\n result_ssa.push_back(AlignedLoad(alloca->getAllocatedType(),\n accumulator_shard, element_alignment));\n }\n return result_ssa;\n}\nvoid IrEmitter::EmitShardedVectorStore(\n llvm::Value* store_address, const std::vector& value_to_store,\n llvm::Align alignment, const llvm_ir::IrArray& containing_array) {\n for (int i = 0; i < value_to_store.size(); i++) {\n auto store_instruction =\n AlignedStore(value_to_store[i], store_address, alignment);\n containing_array.AnnotateLoadStoreInstructionWithMetadata(\n store_instruction);\n if (i != (value_to_store.size() - 1)) {\n store_address =\n ConstInBoundsGEP1_32(value_to_store[i]->getType(), store_address, 1);\n }\n }\n}\nabsl::StatusOr IrEmitter::EmitVectorizedReduce(\n HloInstruction* reduce, HloInstruction* arg, HloInstruction* init_value,\n absl::Span dimensions, HloComputation* function,\n std::string* failure_reason) {\n if (!reduce->shape().IsArray()) {\n *failure_reason = \"vectorization of variadic reduce not implemented\";\n return false;\n }\n if (!ReductionPreservesLayout(*reduce)) {\n return false;\n }\n ReductionGenerator reduction_generator =\n MatchReductionGenerator(function, failure_reason);\n if (!reduction_generator) {\n return false;\n }\n int vector_register_size_in_elements =\n target_machine_features_.vector_register_byte_size(\n *compute_function()->function()) /\n ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type());\n if (vector_register_size_in_elements == 0) {\n return false;\n }\n int vectorization_factor_in_bytes =\n target_machine_features_.vectorization_factor_in_bytes();\n const int vectorization_factor =\n vectorization_factor_in_bytes /\n ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type());\n bool is_reduction_over_minor_dimension = absl::c_linear_search(\n dimensions, LayoutUtil::Minor(arg->shape().layout(), 0));\n llvm::Align element_alignment(tsl::MathUtil::GCD(\n ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type()),\n MinimumAlignmentForPrimitiveType(reduce->shape().element_type())));\n if (is_reduction_over_minor_dimension) {\n *failure_reason = \"reduction over minor dimension not implemented\";\n return false;\n }\n CHECK(!reduce->shape().IsTuple());\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(reduce));\n llvm_ir::ForLoopNest loop_nest(IrName(reduce), b());\n std::vector array_multi_index(\n reduce->shape().dimensions_size());\n for (int i = LayoutUtil::MinorToMajor(reduce->shape()).size() - 1; i > 0;\n --i) {\n int64_t dimension = LayoutUtil::Minor(reduce->shape().layout(), i);\n int64_t start_index = 0;\n int64_t end_index = reduce->shape().dimensions(dimension);\n std::unique_ptr loop = loop_nest.AddLoop(\n start_index, end_index, absl::StrFormat(\"dim.%d\", dimension));\n array_multi_index[dimension] = loop->GetIndVarValue();\n }\n int64_t innermost_dimension = LayoutUtil::Minor(reduce->shape().layout(), 0);\n int64_t innermost_dimension_size =\n reduce->shape().dimensions(innermost_dimension);\n if (llvm::BasicBlock* innermost_body_bb =\n loop_nest.GetInnerLoopBodyBasicBlock()) {\n SetToFirstInsertPoint(innermost_body_bb, b());\n }\n auto outermost_loop_exit_block = loop_nest.GetOuterLoopExitBasicBlock();\n if (innermost_dimension_size >= vectorization_factor) {\n int64_t start_index = 0;\n int64_t end_index = (innermost_dimension_size / vectorization_factor) *\n vectorization_factor;\n std::unique_ptr loop =\n loop_nest.AddLoop(start_index, end_index, vectorization_factor,\n absl::StrFormat(\"dim.%d\", innermost_dimension));\n array_multi_index[innermost_dimension] = loop->GetIndVarValue();\n SetToFirstInsertPoint(loop->GetBodyBasicBlock(), b());\n ShardedVectorType vector_type = CreateShardedVectorType(\n reduce->shape().element_type(), vectorization_factor);\n llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(),\n b()->getInt64Ty());\n TF_ASSIGN_OR_RETURN(std::vector accumulator,\n EmitInnerLoopForVectorizedReduction(\n reduction_generator, array_index, vector_type,\n init_value, arg, dimensions, element_alignment));\n llvm_ir::IrArray target_array = GetIrArrayFor(reduce);\n llvm::Value* output_address =\n target_array.EmitArrayElementAddress(array_index, b());\n EmitShardedVectorStore(output_address, accumulator, element_alignment,\n target_array);\n if (auto exit_terminator = loop->GetExitBasicBlock()->getTerminator()) {\n CHECK_GT(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);\n b()->SetInsertPoint(exit_terminator);\n } else {\n CHECK_EQ(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);\n b()->SetInsertPoint(loop->GetExitBasicBlock());\n }\n }\n if (innermost_dimension_size % vectorization_factor) {\n array_multi_index[innermost_dimension] =\n b()->getInt64(innermost_dimension_size -\n (innermost_dimension_size % vectorization_factor));\n ShardedVectorType vector_type = CreateShardedVectorType(\n reduce->shape().element_type(),\n innermost_dimension_size % vectorization_factor);\n llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(),\n b()->getInt64Ty());\n llvm::IRBuilderBase::FastMathFlagGuard guard(*b());\n llvm::FastMathFlags flags = b()->getFastMathFlags();\n flags.setAllowReassoc(true);\n b()->setFastMathFlags(flags);\n TF_ASSIGN_OR_RETURN(std::vector accumulator,\n EmitInnerLoopForVectorizedReduction(\n reduction_generator, array_index, vector_type,\n init_value, arg, dimensions, element_alignment));\n llvm_ir::IrArray target_array = GetIrArrayFor(reduce);\n llvm::Value* output_address =\n target_array.EmitArrayElementAddress(array_index, b());\n EmitShardedVectorStore(output_address, accumulator, element_alignment,\n target_array);\n }\n if (outermost_loop_exit_block) {\n b()->SetInsertPoint(outermost_loop_exit_block);\n }\n return true;\n}\nabsl::Status IrEmitter::HandleReduce(HloInstruction* reduce) {\n auto arg = reduce->mutable_operand(0);\n auto init_value = reduce->mutable_operand(1);\n absl::Span dimensions(reduce->dimensions());\n HloComputation* function = reduce->to_apply();\n bool saved_allow_reassociation = allow_reassociation_;\n allow_reassociation_ = true;\n auto cleanup = absl::MakeCleanup([saved_allow_reassociation, this]() {\n allow_reassociation_ = saved_allow_reassociation;\n });\n if (!options::VectorizedReduceDisabled(hlo_module_config_)) {\n std::string vectorization_failure_reason;\n TF_ASSIGN_OR_RETURN(\n bool vectorization_successful,\n EmitVectorizedReduce(reduce, arg, init_value, dimensions, function,\n &vectorization_failure_reason));\n if (vectorization_successful) {\n VLOG(1) << \"Successfully vectorized reduction \" << reduce->ToString()\n << \"\\n\";\n return absl::OkStatus();\n } else {\n VLOG(1) << \"Could not vectorize reduction \" << reduce->ToString() << \": \"\n << vectorization_failure_reason;\n }\n }\n return DefaultAction(reduce);\n}\nabsl::Status IrEmitter::HandleSend(HloInstruction* send) {\n return Unimplemented(\"Send is not implemented on CPU.\");\n}\nabsl::Status IrEmitter::HandleSendDone(HloInstruction* send_done) {\n return Unimplemented(\"Send-done is not implemented on CPU.\");\n}\nabsl::Status IrEmitter::HandleScatter(HloInstruction*) {\n return Unimplemented(\"Scatter is not implemented on CPUs.\");\n}\nabsl::Status IrEmitter::HandleSlice(HloInstruction* slice) {\n VLOG(2) << \"HandleSlice: \" << slice->ToString();\n auto operand = slice->operand(0);\n if (ShouldEmitParallelLoopFor(*slice)) {\n return DefaultAction(slice);\n }\n if (!LayoutUtil::Equal(operand->shape().layout(), slice->shape().layout())) {\n return DefaultAction(slice);\n }\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(slice));\n if (ShapeUtil::IsZeroElementArray(slice->shape())) {\n return absl::OkStatus();\n }\n const Layout& layout = operand->shape().layout();\n const int64_t num_dims = operand->shape().dimensions_size();\n absl::flat_hash_set inner_dims;\n for (int64_t dim : LayoutUtil::MinorToMajor(layout)) {\n if (operand->shape().dimensions(dim) != slice->shape().dimensions(dim)) {\n break;\n }\n inner_dims.insert(dim);\n }\n const bool is_trivial_copy = (inner_dims.size() == num_dims);\n if (is_trivial_copy) {\n if (ShapeUtil::IsEffectiveScalar(slice->shape())) {\n return DefaultAction(slice);\n } else {\n return EmitMemcpy(*slice, *operand);\n }\n }\n const Shape logical_element_shape = ShapeUtil::FilterDimensions(\n [&inner_dims](int64_t dim) { return inner_dims.contains(dim); },\n operand->shape());\n const int64_t primitive_elements_per_logical_element =\n ShapeUtil::ElementsIn(logical_element_shape);\n const int64_t memcpy_dim = LayoutUtil::Minor(layout, inner_dims.size());\n const bool memcpy_is_contiguous = slice->slice_strides(memcpy_dim) == 1;\n const int64_t memcpy_logical_elements =\n memcpy_is_contiguous\n ? slice->slice_limits(memcpy_dim) - slice->slice_starts(memcpy_dim)\n : 1;\n llvm::SmallVector outer_dims;\n for (int64_t i = 0; i < num_dims - inner_dims.size() - 1; ++i) {\n outer_dims.push_back(LayoutUtil::Major(layout, i));\n }\n if (!memcpy_is_contiguous) {\n outer_dims.push_back(memcpy_dim);\n }\n llvm_ir::IrArray target_array = GetIrArrayFor(slice);\n const int64_t num_outer_loops = outer_dims.size();\n llvm_ir::ForLoopNest loops(IrName(slice), b());\n std::vector target_multi_index =\n loops.AddLoopsForShapeOnDimensions(slice->shape(), outer_dims, \"slice\");\n std::replace(target_multi_index.begin(), target_multi_index.end(),\n static_cast(nullptr),\n static_cast(b()->getInt64(0)));\n llvm_ir::IrArray::Index target_index(target_multi_index, slice->shape(),\n b()->getInt64Ty());\n if (num_outer_loops > 0) {\n SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b());\n }\n llvm_ir::IrArray source_array = GetIrArrayFor(operand);\n const llvm_ir::IrArray::Index source_index = target_index.SourceIndexOfSlice(\n operand->shape(), slice->slice_starts(),\n slice->slice_strides(), b());\n llvm::Value* memcpy_dest =\n target_array.EmitArrayElementAddress(target_index, b(), \"slice.dest\");\n llvm::Value* memcpy_source =\n source_array.EmitArrayElementAddress(source_index, b(), \"slice.source\");\n const int64_t memcpy_elements =\n primitive_elements_per_logical_element * memcpy_logical_elements;\n EmitTransferElements(memcpy_dest, memcpy_source, memcpy_elements,\n slice->shape().element_type(), target_array,\n source_array);\n if (VLOG_IS_ON(2)) {\n const int64_t memcpy_bytes =\n ShapeUtil::ByteSizeOf(logical_element_shape) * memcpy_elements;\n VLOG(2) << \" emitted copy of \" << memcpy_bytes << \" bytes inside \"\n << num_outer_loops << \" loops\";\n }\n if (num_outer_loops > 0) {\n SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b());\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleDynamicSlice(HloInstruction* dynamic_slice) {\n if (ShapeUtil::IsScalar(dynamic_slice->shape())) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_slice));\n return EmitMemcpy(*dynamic_slice->operand(0), *dynamic_slice);\n }\n return DefaultAction(dynamic_slice);\n}\nabsl::Status IrEmitter::HandleDynamicUpdateSlice(\n HloInstruction* dynamic_update_slice) {\n auto update = dynamic_update_slice->operand(1);\n if (ShapeUtil::IsScalar(dynamic_update_slice->shape())) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice));\n return EmitMemcpy(*update, *dynamic_update_slice);\n } else if (llvm_ir::CanUpdateDynamicSliceInPlace(dynamic_update_slice,\n assignment_)) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice));\n auto operands = GetIrArraysForOperandsOf(dynamic_update_slice);\n return llvm_ir::EmitDynamicUpdateSliceInPlace(\n operands, GetIrArrayFor(dynamic_update_slice),\n IrName(dynamic_update_slice, \"in_place\"), b());\n }\n return DefaultAction(dynamic_update_slice);\n}\nabsl::Status IrEmitter::HandleRecv(HloInstruction* recv) {\n return Unimplemented(\"Recv is not implemented on CPU.\");\n}\nabsl::Status IrEmitter::HandleRecvDone(HloInstruction* recv_done) {\n return Unimplemented(\"Recv-done is not implemented on CPU.\");\n}\nabsl::Status IrEmitter::HandlePad(HloInstruction* pad) {\n CHECK_EQ(pad->operand_count(), 2);\n const auto operand = pad->operand(0);\n const auto padding_value = pad->operand(1);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(pad));\n return HandlePad(pad, GetIrArrayFor(operand), GetIrArrayFor(padding_value),\n GetIrArrayFor(pad));\n}\nabsl::Status IrEmitter::HandlePad(HloInstruction* pad,\n const llvm_ir::IrArray& operand_array,\n const llvm_ir::IrArray& padding_value_array,\n const llvm_ir::IrArray& output_array) {\n CHECK_EQ(pad->operand_count(), 2);\n for (auto& padding_dimension : pad->padding_config().dimensions()) {\n if (padding_dimension.edge_padding_low() < 0 ||\n padding_dimension.edge_padding_high() < 0) {\n return InternalStrCat(\n \"Encountered negative padding in IrEmitter on CPU. \"\n \"This should have been eliminated at the HLO level. \",\n pad->ToString());\n }\n }\n const HloInstruction* padding_value = pad->operand(1);\n const auto index_type = b()->getInt64Ty();\n const auto index = llvm_ir::IrArray::Index(index_type);\n llvm::Value* padding_value_addr = padding_value_array.EmitArrayElementAddress(\n index, b(), \"padding_value_addr\", true, nullptr);\n const llvm_ir::ElementGenerator element_generator =\n [this, padding_value,\n padding_value_addr](const llvm_ir::IrArray::Index& target_index) {\n return b()->CreateLoad(IrShapeType(padding_value->shape()),\n padding_value_addr);\n };\n TF_RETURN_IF_ERROR(EmitTargetElementLoop(\n pad, \"initialize\", element_generator,\n std::optional(output_array)));\n llvm_ir::ForLoopNest loops(IrName(pad, \"assign\"), b());\n const HloInstruction* operand = pad->operand(0);\n const llvm_ir::IrArray::Index operand_index =\n loops.AddLoopsForShape(operand->shape(), \"operand\");\n SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b());\n llvm::Value* operand_data =\n operand_array.EmitReadArrayElement(operand_index, b());\n const PaddingConfig& padding_config = pad->padding_config();\n std::vector output_multi_index;\n for (size_t i = 0; i < operand_index.size(); ++i) {\n llvm::Value* offset =\n Mul(operand_index[i],\n b()->getInt64(padding_config.dimensions(i).interior_padding() + 1));\n llvm::Value* index = Add(\n offset, b()->getInt64(padding_config.dimensions(i).edge_padding_low()));\n output_multi_index.push_back(index);\n }\n llvm_ir::IrArray::Index output_index(\n output_multi_index, output_array.GetShape(), operand_index.GetType());\n output_array.EmitWriteArrayElement(output_index, operand_data, b());\n SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleFusion(HloInstruction* fusion) {\n auto* root = fusion->fused_expression_root();\n if (llvm_ir::CanEmitFusedDynamicUpdateSliceInPlace(fusion, assignment_)) {\n VLOG(3) << \"HandleFusion FusedDynamicUpdateSliceInPlace\";\n CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);\n FusedIrEmitter fused_emitter(elemental_emitter);\n BindFusionArguments(fusion, &fused_emitter);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion));\n return llvm_ir::EmitFusedDynamicUpdateSliceInPlace(\n fusion, GetIrArrayFor(fusion), &fused_emitter, b());\n } else if (fusion->IsLoopFusion()) {\n VLOG(3) << \"HandleFusion kLoop\";\n CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);\n FusedIrEmitter fused_emitter(elemental_emitter);\n BindFusionArguments(fusion, &fused_emitter);\n TF_ASSIGN_OR_RETURN(auto generator, fused_emitter.GetGenerator(\n *fusion->fused_expression_root()));\n return EmitTargetElementLoop(fusion, \"kLoop_fusion\", generator,\n std::nullopt);\n } else if (fusion->IsOutputFusion()) {\n VLOG(3) << \"HandleFusion kOutput\";\n int64_t dot_op_index =\n root->operand(0)->opcode() == HloOpcode::kDot ? 0 : 1;\n const HloInstruction* dot = root->operand(dot_op_index);\n CHECK_EQ(dot->opcode(), HloOpcode::kDot)\n << dot->ToString() << \" \"\n << fusion->fused_instructions_computation()->ToString();\n int64_t dot_lhs_param_number = dot->operand(0)->parameter_number();\n int64_t dot_rhs_param_number = dot->operand(1)->parameter_number();\n int64_t addend_param_number =\n root->operand(1 - dot_op_index)->parameter_number();\n Shape target_shape = fusion->shape();\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion));\n llvm_ir::IrArray target_array = GetIrArrayFor(fusion);\n llvm_ir::IrArray lhs_array(\n GetIrArrayFor(fusion->operand(dot_lhs_param_number)));\n llvm_ir::IrArray rhs_array(\n GetIrArrayFor(fusion->operand(dot_rhs_param_number)));\n llvm_ir::IrArray addend_array(\n GetIrArrayFor(fusion->operand(addend_param_number)));\n TF_RETURN_IF_ERROR(\n EmitDotOperation(*dot, target_array, lhs_array, rhs_array,\n &addend_array, GetExecutableRunOptionsArgument(), b(),\n hlo_module_config_, target_machine_features_));\n return absl::OkStatus();\n } else {\n return Unimplemented(\"Fusion kind not implemented on CPU\");\n }\n}\nabsl::Status IrEmitter::HandleCall(HloInstruction* call) {\n HloComputation* computation = call->to_apply();\n llvm::Function* call_ir_function = FindOrDie(\n emitted_functions_, ComputationToEmit{computation, allow_reassociation_});\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(call));\n auto backend_config_or =\n computation->root_instruction()->backend_config();\n if (backend_config_or.ok() &&\n !backend_config_or->outer_dimension_partitions().empty()) {\n std::vector call_args = GetArrayFunctionCallArguments(\n {}, b(), computation->name(),\n emitted_value_[call],\n GetExecutableRunOptionsArgument(),\n GetBufferTableArgument(),\n GetStatusArgument(),\n GetProfileCountersArgument());\n HloInstruction* root = computation->root_instruction();\n TF_RETURN_IF_ERROR(EmitCallToParallelForkJoin(\n call_args, root->shape(),\n backend_config_or->outer_dimension_partitions(), b(), call_ir_function,\n computation->name()));\n if (ComputationTransitivelyContainsCustomCall(computation)) {\n EmitEarlyReturnIfErrorStatus();\n }\n } else {\n EmitGlobalCall(*computation, computation->name());\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::EmitSliceToDynamic(\n const HloInstruction* hlo, absl::Span source_arrays,\n const llvm_ir::IrArray& target_array) {\n std::vector dynamic_dims;\n int32_t raw_data_size =\n ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(hlo->shape()));\n llvm::Value* dest_buffer = target_array.GetBasePointer();\n for (int64_t i = 1; i < hlo->operand_count(); ++i) {\n const int64_t dim_index = i - 1;\n llvm::Value* source_buffer = source_arrays[i].GetBasePointer();\n llvm::LoadInst* dyn_dim_size = Load(IrShapeType(hlo->operand(i)->shape()),\n source_buffer, \"dyn_dim_size\");\n llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32(\n b()->getInt8Ty(), dest_buffer,\n raw_data_size + dim_index * sizeof(int32_t));\n b()->CreateStore(dyn_dim_size, metadata);\n dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(),\n true,\n \"i64_dyn_dim_size\"));\n }\n auto loop_body_emitter =\n [&](const llvm_ir::IrArray::Index& array_index) -> absl::Status {\n llvm::Value* source_element =\n source_arrays[0].EmitReadArrayElement(array_index, b());\n llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b());\n llvm_ir::IrArray::Index dest_index(linear_index, target_array.GetShape(),\n b());\n target_array.EmitWriteArrayElement(dest_index, source_element, b());\n return absl::OkStatus();\n };\n return llvm_ir::LoopEmitter(loop_body_emitter, target_array.GetShape(),\n dynamic_dims, b())\n .EmitLoop(IrName(hlo));\n}\nabsl::Status IrEmitter::HandleSliceToDynamic(HloInstruction* hlo) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));\n llvm_ir::IrArray target_array = GetIrArrayFor(hlo);\n std::vector source_arrays;\n source_arrays.reserve(hlo->operand_count());\n for (auto operand : hlo->operands()) {\n source_arrays.push_back(GetIrArrayFor(operand));\n }\n return EmitSliceToDynamic(hlo, source_arrays, target_array);\n}\nabsl::Status IrEmitter::HandlePadToStatic(HloInstruction* hlo) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice,\n assignment_.GetUniqueSlice(hlo, {0}));\n std::vector dynamic_dims;\n std::vector tuple_operand_ptrs;\n const Shape& data_shape = ShapeUtil::GetSubshape(hlo->shape(), {0});\n const Shape& input_shape = hlo->operand(0)->shape();\n llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape);\n llvm::Type* data_type = IrShapeType(data_shape);\n llvm_ir::IrArray data_array(data_address, data_type, data_shape);\n llvm::Value* source_buffer = GetEmittedValueFor(hlo->operand(0));\n int64_t raw_data_size =\n ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(input_shape));\n tuple_operand_ptrs.push_back(data_array.GetBasePointer());\n for (int i = 1; i < hlo->shape().tuple_shapes_size(); ++i) {\n const Shape& dim_shape = ShapeUtil::GetSubshape(hlo->shape(), {i});\n TF_RET_CHECK(Shape::Equal()(dim_shape, ShapeUtil::MakeScalarShape(S32)));\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice dim_size_slice,\n assignment_.GetUniqueSlice(hlo, {i}));\n llvm::Value* dest_dim_size_address =\n EmitBufferPointer(dim_size_slice, data_shape);\n const int64_t dim_index = i - 1;\n llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32(\n b()->getInt8Ty(), source_buffer,\n raw_data_size + dim_index * sizeof(int32_t));\n llvm::Value* dyn_dim_size =\n b()->CreateLoad(b()->getInt32Ty(), metadata, \"dyn_dim_size\");\n b()->CreateStore(dyn_dim_size, dest_dim_size_address);\n dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(),\n true,\n \"i64_dyn_dim_size\"));\n tuple_operand_ptrs.push_back(dest_dim_size_address);\n }\n auto loop_body_emitter =\n [&](const llvm_ir::IrArray::Index& array_index) -> absl::Status {\n llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b());\n llvm_ir::IrArray::Index source_index(linear_index, input_shape, b());\n llvm::Value* source_element =\n GetIrArrayFor(hlo->operand(0)).EmitReadArrayElement(source_index, b());\n data_array.EmitWriteArrayElement(array_index, source_element, b());\n return absl::OkStatus();\n };\n TF_RETURN_IF_ERROR(\n llvm_ir::LoopEmitter(loop_body_emitter, input_shape, dynamic_dims, b())\n .EmitLoop(IrName(hlo)));\n llvm_ir::EmitTuple(GetIrArrayFor(hlo), tuple_operand_ptrs, b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleTopK(HloInstruction* hlo) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo));\n const HloInstruction* input = hlo->operand(0);\n const int64_t k = hlo->shape().tuple_shapes(0).dimensions().back();\n const bool has_batch = hlo->shape().tuple_shapes(0).dimensions_size() == 2;\n TF_RET_CHECK(input->shape().element_type() == F32) << hlo->ToString();\n TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(\n hlo->shape().tuple_shapes(0).layout()))\n << hlo->ToString();\n TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(\n hlo->shape().tuple_shapes(1).layout()))\n << hlo->ToString();\n TF_RET_CHECK(\n LayoutUtil::IsMonotonicWithDim0Major(hlo->operand(0)->shape().layout()))\n << hlo->ToString();\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice values_slice,\n assignment_.GetUniqueSlice(hlo->operand(0), {}));\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_values_slice,\n assignment_.GetUniqueSlice(hlo, {0}));\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_indices_slice,\n assignment_.GetUniqueSlice(hlo, {1}));\n llvm::Value* values_ptr =\n EmitBufferPointer(values_slice, hlo->operand(0)->shape());\n llvm::Value* out_values_ptr =\n EmitBufferPointer(out_values_slice, hlo->shape().tuple_shapes(0));\n llvm::Value* out_indices_ptr =\n EmitBufferPointer(out_indices_slice, hlo->shape().tuple_shapes(1));\n EmitCallToFunc(\n runtime::kTopKF32SymbolName,\n {b()->getInt64(has_batch ? input->shape().dimensions(0) : 1),\n b()->getInt64(input->shape().dimensions().back()), b()->getInt64(k),\n values_ptr, out_values_ptr, out_indices_ptr},\n b()->getVoidTy());\n llvm_ir::EmitTuple(GetIrArrayFor(hlo), {out_values_ptr, out_indices_ptr},\n b());\n return absl::OkStatus();\n}\n#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)\nstd::vector IrEmitter::EmitOneDnnOperandsAlloca(\n HloInstruction* custom_call, llvm::Value*& args_val, int& arg_indx) {\n std::vector operands_stack_alloca;\n const int num_operands = custom_call->operand_count();\n operands_stack_alloca.reserve(num_operands);\n for (int i = 0; i < num_operands; ++i) {\n llvm_ir::IrArray ir_array(GetIrArrayFor(custom_call->operand(i)));\n StackAlloca stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), ir_array);\n args_val = b()->CreateInsertValue(args_val, stack_alloca.value, arg_indx++);\n operands_stack_alloca.push_back(std::move(stack_alloca));\n }\n return operands_stack_alloca;\n}\nabsl::Status IrEmitter::HandleOneDnnMatMulCalls(\n HloInstruction* custom_call, std::string runtime_symbol_name) {\n const int nargs_offset = 3;\n const int num_operands = custom_call->operand_count();\n const int nargs = nargs_offset + num_operands;\n int arg_indx = 0;\n llvm::Type* i64_type = b()->getInt64Ty();\n llvm::Type* ptr_type = b()->getPtrTy();\n llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);\n llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);\n llvm::Value* nargs_val = b()->getInt64(nargs);\n llvm::Value* nargs_ptr =\n llvm_ir::EmitAllocaAtFunctionEntry(i64_type, \"nargs\", b());\n b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));\n b()->CreateStore(nargs_val, nargs_ptr);\n args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);\n llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();\n args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);\n auto typed_custom_call = Cast(custom_call);\n auto backend_config = typed_custom_call->backend_config();\n OneDnnMatMulConfig matmul_config;\n matmul_config.CopyFrom(backend_config->onednn_matmul_config());\n std::string str_config;\n matmul_config.SerializeToString(&str_config);\n llvm::Value* matmul_config_val =\n b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));\n args_val = b()->CreateInsertValue(args_val, matmul_config_val, arg_indx++);\n auto operands_stack_alloca =\n EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);\n TF_RET_CHECK(nargs == arg_indx)\n << \"Number of arguments don't equal the last argument index.\";\n llvm::Value* args_ptr =\n llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, \"matmul.args\", b());\n b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));\n b()->CreateStore(args_val, args_ptr);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));\n StackAlloca result_stack_alloca;\n StackAlloca scratch_stack_alloca;\n std::vector fn_call_args;\n fn_call_args.reserve(3);\n const bool use_scratchpad = custom_call->shape().IsTuple();\n if (use_scratchpad) {\n llvm::Value* result_slice_ptr;\n llvm::Value* scratch_slice_ptr;\n llvm_ir::IrArray result_array;\n llvm_ir::IrArray scratch_array;\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice result_slice,\n assignment_.GetUniqueSlice(custom_call, {0}));\n const Shape& result_shape = custom_call->shape().tuple_shapes(0);\n result_slice_ptr = EmitBufferPointer(result_slice, result_shape);\n llvm::Type* ir_type = IrShapeType(result_shape);\n result_array = llvm_ir::IrArray(result_slice_ptr, ir_type, result_shape);\n result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);\n fn_call_args.push_back(result_stack_alloca.value);\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice scratch_slice,\n assignment_.GetUniqueSlice(custom_call, {1}));\n const Shape& scratch_shape = custom_call->shape().tuple_shapes(1);\n scratch_slice_ptr = EmitBufferPointer(scratch_slice, scratch_shape);\n llvm::Type* scratch_type = IrShapeType(scratch_shape);\n scratch_array =\n llvm_ir::IrArray(scratch_slice_ptr, scratch_type, scratch_shape);\n scratch_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), scratch_array);\n fn_call_args.push_back(scratch_stack_alloca.value);\n llvm_ir::EmitTuple(GetIrArrayFor(custom_call),\n {result_slice_ptr, scratch_slice_ptr}, b());\n } else {\n llvm_ir::IrArray result_array;\n result_array = GetIrArrayFor(custom_call);\n result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);\n fn_call_args.push_back(result_stack_alloca.value);\n fn_call_args.push_back(llvm::ConstantPointerNull::get(b()->getPtrTy()));\n }\n fn_call_args.push_back(args_ptr);\n EmitCallToFunc(std::move(runtime_symbol_name), fn_call_args,\n b()->getVoidTy());\n b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));\n b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));\n for (auto& alloca : operands_stack_alloca) {\n alloca.EmitLifetimeEnd();\n }\n result_stack_alloca.EmitLifetimeEnd();\n if (use_scratchpad) {\n scratch_stack_alloca.EmitLifetimeEnd();\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleOneDnnConvolution(HloInstruction* custom_call) {\n const int nargs_offset = 3;\n const int num_operands = custom_call->operand_count();\n const int nargs = nargs_offset + num_operands;\n int arg_indx = 0;\n llvm::Type* i64_type = b()->getInt64Ty();\n llvm::Type* ptr_type = b()->getPtrTy();\n llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);\n llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);\n llvm::Value* nargs_val = b()->getInt64(nargs);\n llvm::Value* nargs_ptr =\n llvm_ir::EmitAllocaAtFunctionEntry(i64_type, \"nargs\", b());\n b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));\n b()->CreateStore(nargs_val, nargs_ptr);\n args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);\n llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();\n args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);\n auto typed_custom_call = Cast(custom_call);\n auto backend_config = typed_custom_call->backend_config();\n OneDnnConvolutionConfig conv_config;\n conv_config.CopyFrom(backend_config->onednn_conv_config());\n std::string str_config;\n conv_config.SerializeToString(&str_config);\n llvm::Value* conv_config_val =\n b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));\n args_val = b()->CreateInsertValue(args_val, conv_config_val, arg_indx++);\n auto operands_stack_alloca =\n EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);\n TF_RET_CHECK(nargs == arg_indx)\n << \"Number of arguments don't equal the last argument index.\";\n llvm::Value* args_ptr = llvm_ir::EmitAllocaAtFunctionEntry(\n ptr_array_type, \"convolution.args\", b());\n b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));\n b()->CreateStore(args_val, args_ptr);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));\n llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);\n auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);\n EmitCallToFunc(runtime::kOneDnnConvolutionSymbolName,\n {result_stack_alloca.value, args_ptr}, b()->getVoidTy());\n b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));\n for (int i = 0; i < num_operands; ++i) {\n operands_stack_alloca[i].EmitLifetimeEnd();\n }\n b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));\n result_stack_alloca.EmitLifetimeEnd();\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleOneDnnLayerNorm(HloInstruction* custom_call) {\n const int nargs_offset = 3;\n const int num_operands = custom_call->operand_count();\n const int nargs = nargs_offset + num_operands;\n int arg_indx = 0;\n llvm::Type* i64_type = b()->getInt64Ty();\n llvm::Type* ptr_type = b()->getPtrTy();\n llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs);\n llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type);\n llvm::Value* nargs_val = b()->getInt64(nargs);\n llvm::Value* nargs_ptr =\n llvm_ir::EmitAllocaAtFunctionEntry(i64_type, \"nargs\", b());\n b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1));\n b()->CreateStore(nargs_val, nargs_ptr);\n args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++);\n llvm::Value* run_opts_val = GetExecutableRunOptionsArgument();\n args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++);\n auto typed_custom_call = Cast(custom_call);\n auto backend_config = typed_custom_call->backend_config();\n OneDnnNormConfig ln_config;\n ln_config.CopyFrom(backend_config->onednn_layer_norm_config());\n std::string str_config;\n ln_config.SerializeToString(&str_config);\n llvm::Value* ln_config_val =\n b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));\n args_val = b()->CreateInsertValue(args_val, ln_config_val, arg_indx++);\n auto operands_stack_alloca =\n EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx);\n TF_RET_CHECK(nargs == arg_indx)\n << \"Number of arguments don't equal the last argument index.\";\n llvm::Value* args_ptr =\n llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, \"layernorm.args\", b());\n b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1));\n b()->CreateStore(args_val, args_ptr);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));\n llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);\n auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);\n EmitCallToFunc(runtime::kOneDnnLayerNormSymbolName,\n {result_stack_alloca.value, args_ptr}, b()->getVoidTy());\n b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1));\n for (int i = 0; i < num_operands; ++i) {\n operands_stack_alloca[i].EmitLifetimeEnd();\n }\n b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1));\n result_stack_alloca.EmitLifetimeEnd();\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleOneDnnSoftmax(HloInstruction* custom_call) {\n auto typed_custom_call = Cast(custom_call);\n auto backend_config = typed_custom_call->backend_config();\n OneDnnSoftmaxConfig softmax_config;\n softmax_config.CopyFrom(backend_config->onednn_softmax_config());\n std::string str_config;\n softmax_config.SerializeToString(&str_config);\n llvm::Value* softmax_config_val =\n b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config));\n auto input = custom_call->operand(0);\n llvm_ir::IrArray input_array(GetIrArrayFor(input));\n auto input_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), input_array);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));\n llvm_ir::IrArray result_array = GetIrArrayFor(custom_call);\n auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array);\n EmitCallToFunc(runtime::kOneDnnSoftmaxSymbolName,\n {GetExecutableRunOptionsArgument(), input_stack_alloca.value,\n result_stack_alloca.value, softmax_config_val},\n b()->getVoidTy());\n input_stack_alloca.EmitLifetimeEnd();\n result_stack_alloca.EmitLifetimeEnd();\n return absl::OkStatus();\n}\n#endif \nabsl::Status IrEmitter::HandleCustomCall(HloInstruction* custom_call) {\n if (custom_call->custom_call_target() == \"PadToStatic\") {\n return HandlePadToStatic(custom_call);\n }\n if (custom_call->custom_call_target() == \"SliceToDynamic\") {\n return HandleSliceToDynamic(custom_call);\n }\n if (custom_call->custom_call_target() == \"TopK\") {\n return HandleTopK(custom_call);\n }\n#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)\n if (custom_call->custom_call_target() == \"__onednn$matmul\") {\n return HandleOneDnnMatMulCalls(custom_call,\n runtime::kOneDnnMatMulSymbolName);\n }\n if (custom_call->custom_call_target() == \"__onednn$softmax\") {\n return HandleOneDnnSoftmax(custom_call);\n }\n if (custom_call->custom_call_target() == \"__onednn$layernorm\") {\n return HandleOneDnnLayerNorm(custom_call);\n }\n if (custom_call->custom_call_target() == \"__onednn$convolution\") {\n return HandleOneDnnConvolution(custom_call);\n }\n if (custom_call->custom_call_target() == \"__onednn$matmul_reorder\") {\n return HandleOneDnnMatMulCalls(custom_call,\n runtime::kOneDnnMatMulReorderSymbolName);\n }\n#endif \n absl::Span operands(custom_call->operands());\n auto typed_custom_call = Cast(custom_call);\n auto is_typed_ffi = typed_custom_call->api_version() ==\n CustomCallApiVersion::API_VERSION_TYPED_FFI;\n std::vector operand_values;\n operand_values.reserve(operands.size());\n for (int64_t i = 0; i < operands.size(); ++i) {\n HloInstruction* operand = operands[i];\n if (is_typed_ffi) {\n TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(\n operand->shape(), [&](const Shape& shape, const ShapeIndex& index) {\n if (!shape.IsArray()) {\n return absl::OkStatus();\n }\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,\n assignment_.GetUniqueSlice(operand, index));\n operand_values.push_back(EmitBufferPointer(slice, shape));\n return absl::OkStatus();\n }));\n } else {\n operand_values.push_back(GetEmittedValueFor(operand));\n }\n }\n llvm::AllocaInst* operands_alloca =\n llvm_ir::EmitAllocaAtFunctionEntryWithCount(\n b()->getPtrTy(), b()->getInt32(operand_values.size()),\n \"cc_operands_alloca\", b());\n if (emit_code_for_msan_) {\n const llvm::DataLayout& dl = module_->getDataLayout();\n llvm::Type* intptr_type = b()->getIntPtrTy(dl);\n EmitCallToFunc(\"__msan_unpoison\",\n {operands_alloca,\n llvm::ConstantInt::get(\n intptr_type, *operands_alloca->getAllocationSize(dl))},\n b()->getVoidTy());\n }\n for (int64_t i = 0; i < operand_values.size(); ++i) {\n llvm::Value* slot_in_operands_alloca =\n InBoundsGEP(operands_alloca->getAllocatedType(), operands_alloca,\n {b()->getInt64(i)});\n Store(operand_values[i], slot_in_operands_alloca);\n }\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));\n std::vector tuple_ptrs;\n if (custom_call->shape().IsTuple()) {\n for (int i = 0; i < ShapeUtil::TupleElementCount(custom_call->shape());\n ++i) {\n const Shape& elem_shape =\n ShapeUtil::GetTupleElementShape(custom_call->shape(), i);\n if (!is_typed_ffi) {\n TF_RET_CHECK(!elem_shape.IsTuple()) << \"Nested tuples not implemented\";\n }\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,\n assignment_.GetUniqueSlice(custom_call, {i}));\n tuple_ptrs.push_back(EmitBufferPointer(slice, elem_shape));\n }\n llvm_ir::EmitTuple(GetIrArrayFor(custom_call), tuple_ptrs, b());\n }\n auto* output_address = GetEmittedValueFor(custom_call);\n switch (typed_custom_call->api_version()) {\n case CustomCallApiVersion::API_VERSION_ORIGINAL:\n EmitCallToFunc(custom_call->custom_call_target(),\n {output_address, operands_alloca}, b()->getVoidTy());\n break;\n case CustomCallApiVersion::API_VERSION_STATUS_RETURNING:\n EmitCallToFunc(custom_call->custom_call_target(),\n {output_address, operands_alloca, GetStatusArgument()},\n b()->getVoidTy());\n EmitEarlyReturnIfErrorStatus();\n break;\n case CustomCallApiVersion::API_VERSION_STATUS_RETURNING_UNIFIED: {\n absl::string_view opaque = typed_custom_call->opaque();\n EmitCallToFunc(custom_call->custom_call_target(),\n {output_address, operands_alloca,\n b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(opaque)),\n b()->getInt64(opaque.size()), GetStatusArgument()},\n b()->getVoidTy());\n EmitEarlyReturnIfErrorStatus();\n break;\n }\n case CustomCallApiVersion::API_VERSION_TYPED_FFI: {\n std::vector buffer_ptrs;\n if (custom_call->shape().IsTuple()) {\n buffer_ptrs.reserve(ShapeUtil::TupleElementCount(custom_call->shape()));\n }\n TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(\n custom_call->shape(),\n [&](const Shape& shape, const ShapeIndex& index) {\n if (!shape.IsArray()) {\n return absl::OkStatus();\n }\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,\n assignment_.GetUniqueSlice(custom_call, index));\n buffer_ptrs.push_back(EmitBufferPointer(slice, shape));\n return absl::OkStatus();\n }));\n llvm::AllocaInst* results_alloca =\n llvm_ir::EmitAllocaAtFunctionEntryWithCount(\n b()->getPtrTy(), b()->getInt32(buffer_ptrs.size()),\n \"ffi_results_alloca\", b());\n if (emit_code_for_msan_) {\n const llvm::DataLayout& dl = module_->getDataLayout();\n llvm::Type* intptr_type = b()->getIntPtrTy(dl);\n EmitCallToFunc(\n \"__msan_unpoison\",\n {results_alloca,\n llvm::ConstantInt::get(intptr_type,\n *results_alloca->getAllocationSize(dl))},\n b()->getVoidTy());\n }\n for (int i = 0; i < buffer_ptrs.size(); ++i) {\n llvm::Value* tuple_slot_in_results_alloca =\n InBoundsGEP(results_alloca->getAllocatedType(), results_alloca,\n {b()->getInt64(i)});\n Store(buffer_ptrs[i], tuple_slot_in_results_alloca);\n }\n EmitCallToFfi(typed_custom_call, results_alloca, operands_alloca);\n EmitEarlyReturnIfErrorStatus();\n break;\n }\n default:\n return Internal(\n \"Unknown custom-call API version enum value: %d (%s)\",\n typed_custom_call->api_version(),\n CustomCallApiVersion_Name(typed_custom_call->api_version()));\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleWhile(HloInstruction* xla_while) {\n HloComputation* condition = xla_while->while_condition();\n TF_RET_CHECK(ShapeUtil::IsScalar(condition->root_instruction()->shape()) &&\n condition->root_instruction()->shape().element_type() == PRED)\n << \"While condition computation must return bool; got: \"\n << ShapeUtil::HumanString(condition->root_instruction()->shape());\n TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(\n xla_while->shape(),\n [this, &xla_while](const Shape& ,\n const ShapeIndex& index) -> absl::Status {\n auto check = [this](const HloInstruction* a, const HloInstruction* b,\n const ShapeIndex& index) -> absl::Status {\n const BufferAllocation::Slice slice_a =\n assignment_.GetUniqueSlice(a, index).value();\n const BufferAllocation::Slice slice_b =\n assignment_.GetUniqueSlice(b, index).value();\n if (slice_a != slice_b) {\n return Internal(\n \"instruction %s %s does not share slice with \"\n \"instruction %s %s\",\n a->ToString(), slice_a.ToString(), b->ToString(),\n slice_b.ToString());\n }\n return absl::OkStatus();\n };\n TF_RETURN_IF_ERROR(check(xla_while, xla_while->operand(0), index));\n TF_RETURN_IF_ERROR(check(\n xla_while, xla_while->while_condition()->parameter_instruction(0),\n index));\n TF_RETURN_IF_ERROR(\n check(xla_while, xla_while->while_body()->parameter_instruction(0),\n index));\n TF_RETURN_IF_ERROR(check(\n xla_while, xla_while->while_body()->root_instruction(), index));\n return absl::OkStatus();\n }));\n const HloInstruction* init = xla_while->operand(0);\n emitted_value_[xla_while] = GetEmittedValueFor(init);\n llvm::BasicBlock* header_bb = llvm::BasicBlock::Create(\n module_->getContext(), IrName(xla_while, \"header\"),\n compute_function()->function());\n Br(header_bb);\n b()->SetInsertPoint(header_bb);\n EmitGlobalCall(*xla_while->while_condition(), IrName(xla_while, \"cond\"));\n llvm::Value* while_predicate = ICmpNE(\n Load(IrShapeType(\n xla_while->while_condition()->root_instruction()->shape()),\n GetBufferForGlobalCallReturnValue(*xla_while->while_condition())),\n llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0));\n llvm::BasicBlock* body_bb =\n llvm::BasicBlock::Create(module_->getContext(), IrName(xla_while, \"body\"),\n compute_function()->function());\n llvm::BasicBlock* exit_bb = llvm::BasicBlock::Create(\n module_->getContext(), IrName(xla_while, \"exit\"));\n CondBr(while_predicate, body_bb, exit_bb);\n b()->SetInsertPoint(body_bb);\n EmitGlobalCall(*xla_while->while_body(), IrName(xla_while, \"body\"));\n Br(header_bb);\n llvm::Function* llvm_fn = compute_function()->function();\n llvm_fn->insert(llvm_fn->end(), exit_bb);\n b()->SetInsertPoint(exit_bb);\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::EmitFastConcatenate(\n const HloInstruction* instr,\n absl::Span source_arrays,\n const llvm_ir::IrArray& target_array) {\n return ::xla::cpu::EmitFastConcatenate(instr, source_arrays, target_array,\n module_, *b());\n}\nabsl::Status EmitFastConcatenate(\n const HloInstruction* instr,\n absl::Span source_arrays,\n const llvm_ir::IrArray& target_array, llvm::Module* module,\n llvm::IRBuilder<>& b) {\n auto* concatenate = Cast(instr);\n const Shape& output_shape = concatenate->shape();\n int64_t concat_dim = concatenate->concatenate_dimension();\n const Layout& output_layout = output_shape.layout();\n auto output_min2maj = LayoutUtil::MinorToMajor(output_layout);\n auto concat_dim_layout_itr = absl::c_find(output_min2maj, concat_dim);\n std::vector inner_dims(output_min2maj.begin(),\n concat_dim_layout_itr);\n std::vector outer_dims(std::next(concat_dim_layout_itr),\n output_min2maj.end());\n llvm_ir::ForLoopNest loops(IrName(concatenate), &b);\n std::vector target_multi_index =\n loops.AddLoopsForShapeOnDimensions(output_shape, outer_dims, \"concat\");\n absl::c_replace(target_multi_index, static_cast(nullptr),\n static_cast(b.getInt64(0)));\n llvm_ir::IrArray::Index target_index(target_multi_index, output_shape,\n b.getInt64Ty());\n if (!outer_dims.empty()) {\n SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b);\n }\n PrimitiveType primitive_type = output_shape.element_type();\n unsigned primitive_type_size =\n ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);\n llvm::Value* target_region_begin =\n target_array.EmitArrayElementAddress(target_index, &b, \"target_region\");\n int64_t byte_offset_into_target_region = 0;\n int64_t inner_dims_product = absl::c_accumulate(\n inner_dims, int64_t{1}, [&](int64_t product, int64_t inner_dim) {\n return product * output_shape.dimensions(inner_dim);\n });\n for (int64_t i = 0; i < source_arrays.size(); ++i) {\n const Shape& input_shape = concatenate->operand(i)->shape();\n const llvm_ir::IrArray& source_array = source_arrays[i];\n llvm_ir::IrArray::Index source_index(target_multi_index, input_shape,\n b.getInt64Ty());\n llvm::Value* copy_source_address =\n source_array.EmitArrayElementAddress(source_index, &b, \"src_addr\");\n llvm::Value* copy_target_address =\n b.CreateGEP(b.getInt8Ty(), target_region_begin,\n b.getInt64(byte_offset_into_target_region));\n ::xla::cpu::EmitTransferElements(\n copy_target_address, copy_source_address,\n inner_dims_product * input_shape.dimensions(concat_dim), primitive_type,\n target_array, source_array, module, b);\n byte_offset_into_target_region += inner_dims_product *\n input_shape.dimensions(concat_dim) *\n primitive_type_size;\n }\n if (!outer_dims.empty()) {\n SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b);\n }\n return absl::OkStatus();\n}\nllvm::Value* IrEmitter::EmitPrintf(absl::string_view fmt,\n absl::Span arguments) {\n std::vector call_args;\n call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt)));\n absl::c_copy(arguments, std::back_inserter(call_args));\n return b()->CreateCall(\n b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction(\n \"printf\",\n llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()},\n true)),\n call_args);\n}\nllvm::Value* IrEmitter::EmitPrintfToStderr(\n absl::string_view fmt, absl::Span arguments) {\n std::vector call_args;\n call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt)));\n absl::c_copy(arguments, std::back_inserter(call_args));\n return b()->CreateCall(\n b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction(\n runtime::kPrintfToStderrSymbolName,\n llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()},\n true)),\n call_args);\n}\nllvm::Value* IrEmitter::EmitCallToFunc(\n std::string func_name, const std::vector& arguments,\n llvm::Type* return_type, bool does_not_throw, bool only_accesses_arg_memory,\n bool only_accesses_inaccessible_mem_or_arg_mem) {\n std::vector types;\n types.reserve(arguments.size());\n absl::c_transform(arguments, std::back_inserter(types),\n [&](llvm::Value* val) { return val->getType(); });\n llvm::FunctionType* func_type =\n llvm::FunctionType::get(return_type, types, false);\n auto func = llvm::dyn_cast(\n module_->getOrInsertFunction(func_name, func_type).getCallee());\n func->setCallingConv(llvm::CallingConv::C);\n if (does_not_throw) {\n func->setDoesNotThrow();\n }\n if (only_accesses_arg_memory) {\n func->setOnlyAccessesArgMemory();\n }\n if (only_accesses_inaccessible_mem_or_arg_mem) {\n func->setOnlyAccessesInaccessibleMemOrArgMem();\n }\n return b()->CreateCall(func, arguments);\n}\ntemplate \nstatic const Shape& GetShape(T&& arg) {\n if constexpr (std::is_convertible_v,\n Shape>) {\n return arg; \n } else {\n return arg->shape();\n }\n};\nstruct EncodedInfo {\n llvm::AllocaInst* alloca;\n int64_t size;\n};\ntemplate \nstatic EncodedInfo StoreEncodedTypes(std::string_view alloca_name,\n const Args& args, llvm::IRBuilder<>& ir) {\n int64_t total_elements = 0;\n for (int64_t i = 0; i < args.size(); ++i) {\n total_elements += ShapeUtil::GetLeafCount(GetShape(args[i]));\n }\n llvm::AllocaInst* types_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount(\n ir.getInt32Ty(), ir.getInt64(total_elements), alloca_name, &ir);\n int64_t element_id = 0;\n auto store_type = [&](const Shape& shape, const ShapeIndex& index) {\n if (shape.IsTuple()) {\n return;\n }\n llvm::Value* slot_in_types_alloca = ir.CreateConstInBoundsGEP1_32(\n ir.getInt32Ty(), types_alloca, element_id++);\n ir.CreateStore(ir.getInt32(shape.element_type()), slot_in_types_alloca);\n };\n for (int64_t i = 0; i < args.size(); ++i) {\n ShapeUtil::ForEachSubshape(GetShape(args[i]), store_type);\n }\n CHECK_EQ(element_id, total_elements);\n return {types_alloca, total_elements};\n};\ntemplate \nstatic EncodedInfo StoreEncodedShapes(std::string_view alloca_name,\n const Args& args, llvm::IRBuilder<>& ir) {\n int64_t total_dims = 0;\n int64_t total_dim_counts = 0;\n for (int64_t i = 0; i < args.size(); ++i) {\n ShapeUtil::ForEachSubshape(\n GetShape(args[i]), [&](const Shape& shape, const ShapeIndex& index) {\n if (!shape.IsArray()) {\n return;\n }\n total_dims += shape.dimensions().size();\n ++total_dim_counts;\n });\n }\n int64_t shapes_encoding_size = total_dim_counts \n + total_dims; \n llvm::AllocaInst* shapes_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount(\n ir.getInt64Ty(), ir.getInt64(shapes_encoding_size), alloca_name, &ir);\n int64_t slot_id = 0;\n auto store_shape = [&](const Shape& shape, const ShapeIndex& index) {\n if (!shape.IsArray()) {\n return;\n }\n llvm::Value* alloca_slot = ir.CreateConstInBoundsGEP1_64(\n ir.getInt64Ty(), shapes_alloca, slot_id++);\n ir.CreateStore(ir.getInt64(shape.dimensions().size()), alloca_slot);\n for (int64_t dim : shape.dimensions()) {\n alloca_slot = ir.CreateConstInBoundsGEP1_64(ir.getInt64Ty(),\n shapes_alloca, slot_id++);\n ir.CreateStore(ir.getInt64(dim), alloca_slot);\n }\n };\n for (int64_t i = 0; i < args.size(); ++i) {\n ShapeUtil::ForEachSubshape(GetShape(args[i]), store_shape);\n }\n CHECK_EQ(slot_id, shapes_encoding_size); \n return {shapes_alloca, shapes_encoding_size};\n};\nllvm::Value* IrEmitter::EmitCallToFfi(HloCustomCallInstruction* custom_call,\n llvm::AllocaInst* results_alloca,\n llvm::AllocaInst* operands_alloca) {\n const auto& operands = absl::MakeSpan(custom_call->operands());\n const auto& shape = custom_call->shape();\n const auto& result_shapes =\n shape.IsTuple() ? shape.tuple_shapes() : std::vector({shape});\n EncodedInfo operand_types_encoded =\n StoreEncodedTypes(\"operands_types\", operands, *b());\n EncodedInfo operand_shapes_encoded =\n StoreEncodedShapes(\"operands_shapes\", operands, *b());\n EncodedInfo result_types_encoded =\n StoreEncodedTypes(\"results_types\", result_shapes, *b());\n EncodedInfo result_shapes_encoded =\n StoreEncodedShapes(\"results_shapes\", result_shapes, *b());\n const absl::string_view target = custom_call->custom_call_target(); \n const absl::string_view opaque = custom_call->opaque();\n const auto target_ref = llvm_ir::AsStringRef(target);\n const auto opaque_ref = llvm_ir::AsStringRef(opaque);\n std::vector arguments = {\n GetExecutableRunOptionsArgument(),\n b()->CreateGlobalStringPtr(target_ref),\n b()->getInt64(target.size()),\n results_alloca,\n operands_alloca,\n b()->CreateGlobalStringPtr(opaque_ref),\n b()->getInt64(opaque.size()),\n GetStatusArgument(),\n operand_types_encoded.alloca,\n b()->getInt64(operand_types_encoded.size),\n operand_shapes_encoded.alloca,\n result_types_encoded.alloca,\n b()->getInt64(result_types_encoded.size),\n result_shapes_encoded.alloca,\n };\n return EmitCallToFunc(runtime::kHandleFfiCallSymbolName, arguments,\n b()->getVoidTy(),\n false,\n true);\n}\nvoid IrEmitter::EmitTransferElements(llvm::Value* target, llvm::Value* source,\n int64_t element_count,\n PrimitiveType primitive_type,\n const llvm_ir::IrArray& target_array,\n const llvm_ir::IrArray& source_array) {\n ::xla::cpu::EmitTransferElements(target, source, element_count,\n primitive_type, target_array, source_array,\n module_, *b());\n}\nvoid EmitTransferElements(llvm::Value* target, llvm::Value* source,\n int64_t element_count, PrimitiveType primitive_type,\n const llvm_ir::IrArray& target_array,\n const llvm_ir::IrArray& source_array,\n llvm::Module* module, llvm::IRBuilder<>& b) {\n unsigned primitive_type_size =\n ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);\n llvm::Align element_alignment(tsl::MathUtil::GCD(\n primitive_type_size,\n ::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type)));\n llvm::Type* primitive_llvm_type =\n llvm_ir::PrimitiveTypeToIrType(primitive_type, module);\n if (element_count == 1) {\n auto* load_instruction =\n b.CreateAlignedLoad(primitive_llvm_type, source, element_alignment);\n source_array.AnnotateLoadStoreInstructionWithMetadata(load_instruction);\n auto* store_instruction =\n b.CreateAlignedStore(load_instruction, target, element_alignment);\n target_array.AnnotateLoadStoreInstructionWithMetadata(store_instruction);\n } else {\n auto* memcpy_instruction = b.CreateMemCpy(\n target, llvm::Align(element_alignment), source,\n llvm::Align(element_alignment),\n element_count * primitive_type_size);\n std::map merged_metadata =\n llvm_ir::MergeMetadata(&module->getContext(), source_array.metadata(),\n target_array.metadata());\n for (const auto& kind_md_pair : merged_metadata) {\n memcpy_instruction->setMetadata(kind_md_pair.first, kind_md_pair.second);\n }\n }\n}\nabsl::Status IrEmitter::CanDoFastConcatenate(\n const HloInstruction* instr) const {\n if (ShouldEmitParallelLoopFor(*instr)) {\n return absl::Status(\n absl::StatusCode::kFailedPrecondition,\n \"Cannot generate memcpy-based concat for the parallel CPU backend\");\n }\n const auto* concatenate = Cast(instr);\n const Shape& output_shape = concatenate->shape();\n for (auto* op : concatenate->operands()) {\n if (!LayoutUtil::Equal(op->shape().layout(), output_shape.layout())) {\n return absl::Status(absl::StatusCode::kFailedPrecondition,\n \"Operand has mismatching layouts\");\n }\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleConcatenate(HloInstruction* concatenate) {\n absl::Status fast_impl_reason = CanDoFastConcatenate(concatenate);\n if (fast_impl_reason.ok()) {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(concatenate));\n llvm_ir::IrArray target_array = GetIrArrayFor(concatenate);\n std::vector source_arrays;\n source_arrays.reserve(concatenate->operands().size());\n for (HloInstruction* operand : concatenate->operands()) {\n source_arrays.emplace_back(GetIrArrayFor(operand));\n }\n TF_RETURN_IF_ERROR(::xla::cpu::EmitFastConcatenate(\n concatenate, source_arrays, target_array, module_, *b()));\n VLOG(1) << \"Emitted fast concatenate for \" << concatenate->ToString();\n return absl::OkStatus();\n }\n VLOG(1) << \"Could not emit fast concatenate for \" << concatenate->ToString()\n << \": \" << fast_impl_reason.message();\n return DefaultAction(concatenate);\n}\nabsl::Status IrEmitter::HandleConditional(HloInstruction* conditional) {\n auto branch_index = conditional->operand(0);\n int num_branches = conditional->branch_count();\n TF_RET_CHECK(ShapeUtil::IsScalar(branch_index->shape()) &&\n (branch_index->shape().element_type() == PRED ||\n branch_index->shape().element_type() == S32))\n << \"Branch index on a conditional must be scalar bool or int32_t; got: \"\n << ShapeUtil::HumanString(branch_index->shape());\n for (int b = 0; b < num_branches; ++b) {\n HloComputation* br_computation = conditional->branch_computation(b);\n TF_RET_CHECK(ShapeUtil::Equal(conditional->shape(),\n br_computation->root_instruction()->shape()))\n << \"Shape of conditional should be same as the shape of the \" << b\n << \"th branch computation; got: \"\n << ShapeUtil::HumanString(conditional->shape()) << \" and \"\n << ShapeUtil::HumanString(br_computation->root_instruction()->shape());\n }\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(conditional));\n if (branch_index->shape().element_type() == PRED) {\n llvm::LoadInst* pred_value = Load(\n GetIrArrayFor(branch_index).GetBasePointeeType(),\n GetIrArrayFor(branch_index).GetBasePointer(), \"load_predicate_value\");\n llvm::Value* pred_cond =\n ICmpNE(pred_value,\n llvm::ConstantInt::get(\n llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),\n \"boolean_predicate\");\n llvm_ir::LlvmIfData if_data =\n llvm_ir::EmitIfThenElse(pred_cond, \"conditional\", b());\n SetToFirstInsertPoint(if_data.true_block, b());\n EmitGlobalCall(*conditional->branch_computation(0),\n IrName(conditional, \"_true\"));\n SetToFirstInsertPoint(if_data.false_block, b());\n EmitGlobalCall(*conditional->branch_computation(1),\n IrName(conditional, \"_false\"));\n SetToFirstInsertPoint(if_data.after_block, b());\n return absl::OkStatus();\n }\n llvm::LoadInst* branch_index_value = Load(\n GetIrArrayFor(branch_index).GetBasePointeeType(),\n GetIrArrayFor(branch_index).GetBasePointer(), \"load_branch_index_value\");\n auto case_block = b()->GetInsertBlock();\n llvm::BasicBlock* after_block;\n if (case_block->getTerminator() == nullptr) {\n after_block = llvm_ir::CreateBasicBlock(nullptr, \"case-after\", b());\n b()->SetInsertPoint(case_block);\n b()->CreateBr(after_block);\n } else {\n after_block =\n case_block->splitBasicBlock(b()->GetInsertPoint(), \"case-after\");\n }\n case_block->getTerminator()->eraseFromParent();\n auto default_block = llvm_ir::CreateBasicBlock(nullptr, \"case-default\", b());\n b()->SetInsertPoint(default_block);\n EmitGlobalCall(*conditional->branch_computation(num_branches - 1),\n IrName(conditional, \"_default\"));\n b()->CreateBr(after_block);\n b()->SetInsertPoint(case_block);\n llvm::SwitchInst* case_inst =\n b()->CreateSwitch(branch_index_value, default_block, num_branches - 1);\n for (int br = 0; br < num_branches - 1; ++br) { \n auto branch_block = llvm_ir::CreateBasicBlock(\n nullptr, absl::StrCat(\"case-branch\", br), b());\n b()->SetInsertPoint(branch_block);\n EmitGlobalCall(*conditional->branch_computation(br),\n IrName(conditional, absl::StrCat(\"_branch\", br)));\n b()->CreateBr(after_block);\n case_inst->addCase(b()->getInt32(br), branch_block);\n }\n SetToFirstInsertPoint(after_block, b());\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleAfterAll(HloInstruction* after_all) {\n TF_RET_CHECK(ByteSizeOf(after_all->shape()) == 0);\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(after_all));\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleBatchNormGrad(HloInstruction* batch_norm_grad) {\n return Unimplemented(\"BatchNormGrad should be rewritten for CPU.\");\n}\nabsl::Status IrEmitter::HandleBatchNormTraining(\n HloInstruction* batch_norm_training) {\n return Unimplemented(\"BatchNormTraining should be rewritten for CPU.\");\n}\nabsl::Status IrEmitter::HandleGetDimensionSize(HloInstruction* get_size) {\n return Unimplemented(\"GetDimensionSize should be rewritten for CPU.\");\n}\nabsl::Status IrEmitter::HandleSetDimensionSize(HloInstruction* set_size) {\n return Unimplemented(\"SetDimensionSize should be rewritten for CPU.\");\n}\nabsl::Status IrEmitter::HandleAddDependency(HloInstruction* add_dependency) {\n emitted_value_[add_dependency] =\n GetEmittedValueFor(add_dependency->operand(0));\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleRng(HloInstruction* rng) {\n return Unimplemented(\"Rng should be expanded for CPU.\");\n}\nabsl::Status IrEmitter::HandleRngBitGenerator(HloInstruction* rng) {\n return Unimplemented(\"RngBitGenerator should be expanded for CPU.\");\n}\nabsl::Status IrEmitter::HandleRngGetAndUpdateState(HloInstruction* rng_state) {\n VLOG(2) << \"RngGetAndUpdateState: \" << rng_state->ToString();\n llvm::Value* old_state = llvm_ir::RngGetAndUpdateState(\n Cast(rng_state)->delta(), module_,\n b());\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rng_state));\n llvm::Value* address = GetEmittedValueFor(rng_state);\n llvm::StoreInst* store = Store(old_state, address);\n store->setAlignment(llvm::Align(IrEmitter::MinimumAlignmentForPrimitiveType(\n rng_state->shape().element_type())));\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::HandleStochasticConvert(HloInstruction* instruction) {\n return Unimplemented(\"StochasticConvert should be decomposed for CPU.\");\n}\nabsl::Status IrEmitter::FinishVisit(HloInstruction* root) {\n VLOG(2) << \"FinishVisit root: \" << root->ToString();\n if (root->opcode() == HloOpcode::kOutfeed) {\n VLOG(2) << \" outfeed with value: \"\n << llvm_ir::DumpToString(GetEmittedValueFor(root->operand(0)));\n } else {\n VLOG(2) << \" value: \" << llvm_ir::DumpToString(GetEmittedValueFor(root));\n }\n auto record_complete_computation = [&](llvm::Value* prof_counter) {\n if (prof_counter) {\n profiling_state_.RecordCompleteComputation(b(), prof_counter);\n }\n };\n record_complete_computation(GetProfileCounterFor(*root->parent()));\n return absl::OkStatus();\n}\ntemplate \nllvm::Value* IrEmitter::GetProfileCounterCommon(\n const T& hlo,\n const absl::flat_hash_map& profile_index_map) {\n auto it = profile_index_map.find(&hlo);\n if (it == profile_index_map.end()) {\n return nullptr;\n }\n int64_t prof_counter_idx = it->second;\n std::string counter_name = IrName(\"prof_counter\", hlo.name());\n return GEP(b()->getInt64Ty(), GetProfileCountersArgument(),\n b()->getInt64(prof_counter_idx), counter_name);\n}\nllvm::Value* IrEmitter::GetProfileCounterFor(\n const HloInstruction& instruction) {\n return GetProfileCounterCommon(instruction,\n instruction_to_profile_idx_);\n}\nllvm::Value* IrEmitter::GetProfileCounterFor(\n const HloComputation& computation) {\n return GetProfileCounterCommon(computation,\n computation_to_profile_idx_);\n}\nvoid IrEmitter::ProfilingState::UpdateProfileCounter(llvm::IRBuilder<>* b,\n llvm::Value* prof_counter,\n llvm::Value* cycle_end,\n llvm::Value* cycle_start) {\n auto* cycle_diff = b->CreateSub(cycle_end, cycle_start);\n llvm::LoadInst* old_cycle_count = b->CreateLoad(\n llvm::cast(prof_counter)->getSourceElementType(),\n prof_counter, \"old_cycle_count\");\n auto* new_cycle_count =\n b->CreateAdd(cycle_diff, old_cycle_count, \"new_cycle_count\");\n b->CreateStore(new_cycle_count, prof_counter);\n}\nllvm::Value* IrEmitter::ProfilingState::ReadCycleCounter(llvm::IRBuilder<>* b) {\n llvm::Module* module = b->GetInsertBlock()->getModule();\n if (!use_rdtscp_) {\n llvm::Function* func_llvm_readcyclecounter =\n llvm::Intrinsic::getDeclaration(module,\n llvm::Intrinsic::readcyclecounter);\n return b->CreateCall(func_llvm_readcyclecounter);\n }\n llvm::Function* func_llvm_x86_rdtscp =\n llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::x86_rdtscp);\n llvm::Value* rdtscp_call = b->CreateCall(func_llvm_x86_rdtscp);\n return b->CreateExtractValue(rdtscp_call, {0});\n}\nvoid IrEmitter::ProfilingState::RecordCycleStart(llvm::IRBuilder<>* b,\n HloInstruction* hlo) {\n auto* cycle_start = ReadCycleCounter(b);\n cycle_start->setName(IrName(hlo, \"cycle_start\"));\n cycle_starts_[hlo] = cycle_start;\n if (first_read_cycle_start_ == nullptr) {\n first_read_cycle_start_ = cycle_start;\n }\n}\nvoid IrEmitter::ProfilingState::RecordCycleDelta(llvm::IRBuilder<>* b,\n HloInstruction* hlo,\n llvm::Value* prof_counter) {\n auto* cycle_end = ReadCycleCounter(b);\n cycle_end->setName(IrName(hlo, \"cycle_end\"));\n auto* cycle_start = cycle_starts_[hlo];\n UpdateProfileCounter(b, prof_counter, cycle_end, cycle_start);\n last_read_cycle_end_ = cycle_end;\n}\nvoid IrEmitter::ProfilingState::RecordCompleteComputation(\n llvm::IRBuilder<>* b, llvm::Value* prof_counter) {\n if (last_read_cycle_end_ && first_read_cycle_start_) {\n UpdateProfileCounter(b, prof_counter, last_read_cycle_end_,\n first_read_cycle_start_);\n }\n}\nvoid IrEmitter::TracingState::EmitTracingStart(llvm::IRBuilder<>* b,\n HloInstruction* hlo,\n llvm::Value* run_options) {\n if (!enabled_) {\n return;\n }\n llvm::Type* void_ptr_type = b->getPtrTy();\n llvm::FunctionType* fn_type = llvm::FunctionType::get(\n b->getInt64Ty(),\n {void_ptr_type, void_ptr_type, void_ptr_type, b->getInt64Ty()},\n false);\n llvm::Function* function = b->GetInsertBlock()->getParent();\n llvm::Module* module = function->getParent();\n const char* fn_name = runtime::kTracingStartSymbolName;\n llvm::FunctionCallee trace_func =\n module->getOrInsertFunction(fn_name, fn_type);\n if (auto* fn = llvm::dyn_cast(trace_func.getCallee())) {\n fn->setCallingConv(llvm::CallingConv::C);\n fn->setDoesNotThrow();\n fn->setOnlyAccessesArgMemory();\n }\n auto* hlo_name = b->CreateGlobalStringPtr(hlo->name());\n auto* hlo_module = b->CreateGlobalStringPtr(hlo->GetModule()->name());\n auto* program_id = b->getInt64(hlo->GetModule()->unique_id());\n auto* activity_id = b->CreateCall(\n trace_func, {run_options, hlo_name, hlo_module, program_id});\n activity_id->setName(IrName(hlo, \"activity_id\"));\n activity_ids_[hlo] = activity_id;\n}\nvoid IrEmitter::TracingState::EmitTracingEnd(llvm::IRBuilder<>* b,\n HloInstruction* hlo,\n llvm::Value* run_options) {\n if (!enabled_) {\n return;\n }\n llvm::FunctionType* fn_type =\n llvm::FunctionType::get(b->getVoidTy(), {b->getPtrTy(), b->getInt64Ty()},\n false);\n llvm::Function* function = b->GetInsertBlock()->getParent();\n llvm::Module* module = function->getParent();\n const char* fn_name = runtime::kTracingEndSymbolName;\n llvm::FunctionCallee trace_func =\n module->getOrInsertFunction(fn_name, fn_type);\n if (auto* fn = llvm::dyn_cast(trace_func.getCallee())) {\n fn->setCallingConv(llvm::CallingConv::C);\n fn->setDoesNotThrow();\n fn->setOnlyAccessesArgMemory();\n }\n auto* activity_id = activity_ids_.at(hlo);\n b->CreateCall(trace_func, {run_options, activity_id});\n}\nnamespace {\nbool IsHloVeryCheap(const HloInstruction* hlo) {\n return hlo->opcode() == HloOpcode::kBitcast ||\n hlo->opcode() == HloOpcode::kTuple ||\n hlo->opcode() == HloOpcode::kGetTupleElement ||\n hlo->opcode() == HloOpcode::kParameter ||\n hlo->opcode() == HloOpcode::kConstant ||\n hlo->opcode() == HloOpcode::kReplicaId;\n}\n} \nabsl::Status IrEmitter::Preprocess(HloInstruction* hlo) {\n VLOG(3) << \"Visiting: \" << hlo->ToString();\n if (instruction_to_profile_idx_.count(hlo) ||\n (hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) &&\n hlo->parent()->IsEntryComputation())) {\n tracing_state_.EmitTracingStart(b(), hlo,\n GetExecutableRunOptionsArgument());\n profiling_state_.RecordCycleStart(b(), hlo);\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::Postprocess(HloInstruction* hlo) {\n if (auto* prof_counter = GetProfileCounterFor(*hlo)) {\n profiling_state_.RecordCycleDelta(b(), hlo, prof_counter);\n }\n if (instruction_to_profile_idx_.count(hlo) ||\n (hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) &&\n hlo->parent()->IsEntryComputation())) {\n tracing_state_.EmitTracingEnd(b(), hlo, GetExecutableRunOptionsArgument());\n }\n return absl::OkStatus();\n}\nllvm_ir::IrArray IrEmitter::GetIrArrayFor(const HloInstruction* hlo) {\n llvm::Value* value_for_op = GetEmittedValueFor(hlo);\n llvm::Type* ir_type = IrShapeType(hlo->shape());\n llvm_ir::IrArray array(value_for_op, ir_type, hlo->shape());\n AddAliasingInformationToIrArray(*hlo, &array);\n return array;\n}\nstd::vector IrEmitter::GetIrArraysForOperandsOf(\n const HloInstruction* hlo) {\n std::vector arrays;\n std::transform(\n hlo->operands().begin(), hlo->operands().end(),\n std::back_inserter(arrays),\n [&](const HloInstruction* operand) { return GetIrArrayFor(operand); });\n return arrays;\n}\nllvm::Value* IrEmitter::GetEmittedValueFor(const HloInstruction* hlo) {\n auto it = emitted_value_.find(hlo);\n if (it == emitted_value_.end()) {\n LOG(FATAL) << \"could not find emitted value for: \" << hlo->ToString();\n }\n return it->second;\n}\nllvm::Type* IrEmitter::IrShapeType(const Shape& shape) {\n return llvm_ir::ShapeToIrType(shape, module_);\n}\nllvm::Value* IrEmitter::GetProfileCountersArgument() {\n return compute_function()->profile_counters_arg();\n}\nllvm::Value* IrEmitter::GetStatusArgument() {\n return compute_function()->status_arg();\n}\nllvm::Value* IrEmitter::GetBufferTableArgument() {\n return compute_function()->buffer_table_arg();\n}\nllvm::Value* IrEmitter::GetExecutableRunOptionsArgument() {\n return compute_function()->exec_run_options_arg();\n}\nllvm::BasicBlock* IrEmitter::GetReturnBlock() {\n return compute_function()->return_block();\n}\nvoid IrEmitter::EmitEarlyReturnIfErrorStatus() {\n llvm::Value* succeeded =\n EmitCallToFunc(runtime::kStatusIsSuccessSymbolName, {GetStatusArgument()},\n b()->getInt1Ty(), true,\n true);\n llvm_ir::EmitEarlyReturn(succeeded, b(), GetReturnBlock());\n}\nllvm::Value* IrEmitter::EmitThreadLocalBufferPointer(\n const BufferAllocation::Slice& slice, const Shape& target_shape) {\n const BufferAllocation& allocation = *slice.allocation();\n llvm::Value* tempbuf_address = [&]() -> llvm::Value* {\n auto param_it =\n computation_parameter_allocations_.find(slice.allocation()->index());\n if (param_it != computation_parameter_allocations_.end()) {\n int64_t param_number = param_it->second;\n llvm::Value* params = compute_function()->parameters_arg();\n llvm::Value* param_address_offset = llvm_ir::EmitBufferIndexingGEP(\n params, b()->getPtrTy(), param_number, b());\n llvm::LoadInst* param_address_untyped =\n Load(b()->getPtrTy(), param_address_offset);\n if (!target_shape.IsOpaque()) {\n AttachAlignmentMetadataForLoad(param_address_untyped, target_shape);\n AttachDereferenceableMetadataForLoad(param_address_untyped,\n target_shape);\n }\n return param_address_untyped;\n }\n const auto& assigned_buffers = allocation.assigned_buffers();\n CHECK_EQ(1, assigned_buffers.size());\n const Shape& shape = assigned_buffers.begin()->first->shape();\n std::pair key = {\n compute_function()->function(), slice};\n auto buf_it = thread_local_buffers_.find(key);\n if (buf_it == thread_local_buffers_.end()) {\n llvm::Value* buffer = llvm_ir::EmitAllocaAtFunctionEntry(\n IrShapeType(shape), absl::StrCat(\"thread_local\", slice.ToString()),\n b(), MinimumAlignmentForShape(target_shape));\n auto it_inserted_pair = thread_local_buffers_.insert({key, buffer});\n CHECK(it_inserted_pair.second);\n buf_it = it_inserted_pair.first;\n }\n return buf_it->second;\n }();\n return tempbuf_address;\n}\nllvm::Value* IrEmitter::EmitGlobalBufferPointer(\n const BufferAllocation::Slice& slice, const Shape& target_shape) {\n const BufferAllocation& allocation = *slice.allocation();\n llvm::Value* tempbuf_address_ptr = llvm_ir::EmitBufferIndexingGEP(\n GetBufferTableArgument(), b()->getPtrTy(), slice.index(), b());\n llvm::LoadInst* tempbuf_address_base =\n Load(b()->getPtrTy(), tempbuf_address_ptr);\n AttachInvariantLoadMetadataForLoad(tempbuf_address_base);\n AttachAlignmentMetadataForLoad(tempbuf_address_base, allocation.size());\n AttachDereferenceableMetadataForLoad(tempbuf_address_base, allocation.size());\n llvm::Value* tempbuf_address_untyped = tempbuf_address_base;\n if (slice.offset() > 0) {\n tempbuf_address_untyped = InBoundsGEP(\n b()->getInt8Ty(), tempbuf_address_base, b()->getInt64(slice.offset()));\n }\n return tempbuf_address_untyped;\n}\nllvm::Value* IrEmitter::EmitBufferPointer(const BufferAllocation::Slice& slice,\n const Shape& target_shape) {\n if (slice.allocation()->is_thread_local()) {\n return EmitThreadLocalBufferPointer(slice, target_shape);\n } else if (slice.allocation()->is_constant()) {\n return FindOrDie(constant_buffer_to_global_, slice.allocation()->index());\n } else {\n return EmitGlobalBufferPointer(slice, target_shape);\n }\n}\nabsl::Status IrEmitter::EmitTargetAddressForOp(const HloInstruction* op) {\n const Shape& target_shape = op->shape();\n TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,\n assignment_.GetUniqueTopLevelSlice(op));\n llvm::Value* addr = EmitBufferPointer(slice, target_shape);\n addr->setName(IrName(op));\n emitted_value_[op] = addr;\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::EmitTargetElementLoop(\n const HloInstruction* target_op, absl::string_view desc,\n const llvm_ir::ElementGenerator& element_generator,\n std::optional result_array_opt) {\n VLOG(2) << \"EmitTargetElementLoop: \" << target_op->ToString();\n llvm_ir::IrArray target_array;\n if (result_array_opt.has_value()) {\n target_array = result_array_opt.value();\n } else {\n TF_RETURN_IF_ERROR(EmitTargetAddressForOp(target_op));\n target_array = GetIrArrayFor(target_op);\n }\n const Shape& target_shape = target_op->shape();\n if (target_shape.IsTuple() &&\n (target_op->opcode() == HloOpcode::kFusion ||\n target_op->opcode() == HloOpcode::kReduce ||\n target_op->opcode() == HloOpcode::kReduceWindow)) {\n TF_RET_CHECK(num_dynamic_loop_bounds_ == 0);\n std::vector output_arrays;\n for (int64_t i = 0; i < ShapeUtil::TupleElementCount(target_shape); ++i) {\n TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,\n assignment_.GetUniqueSlice(target_op, {i}));\n const Shape& element_shape = ShapeUtil::GetSubshape(target_shape, {i});\n llvm::Value* op_target_address = EmitBufferPointer(slice, element_shape);\n llvm::Type* op_target_type = IrShapeType(element_shape);\n output_arrays.push_back(\n llvm_ir::IrArray(op_target_address, op_target_type, element_shape));\n }\n TF_RETURN_IF_ERROR(\n llvm_ir::LoopEmitter(element_generator, output_arrays, b())\n .EmitLoop(IrName(target_op, desc)));\n std::vector tuple_operand_ptrs;\n tuple_operand_ptrs.reserve(output_arrays.size());\n for (int64_t i = 0; i < output_arrays.size(); ++i) {\n tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer());\n }\n llvm_ir::EmitTuple(target_array, tuple_operand_ptrs, b());\n } else {\n if (ShouldEmitParallelLoopFor(*target_op)) {\n std::vector> dynamic_loop_bounds =\n compute_function()->GetDynamicLoopBounds();\n TF_RETURN_IF_ERROR(ParallelLoopEmitter(element_generator, target_array,\n &dynamic_loop_bounds, b())\n .EmitLoop(IrName(target_op, desc)));\n } else {\n TF_RETURN_IF_ERROR(\n llvm_ir::LoopEmitter(element_generator, target_array, b())\n .EmitLoop(IrName(target_op, desc)));\n }\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::EmitMemcpy(const HloInstruction& source,\n const HloInstruction& destination) {\n llvm::Value* source_value = GetEmittedValueFor(&source);\n llvm::Value* destination_value = GetEmittedValueFor(&destination);\n int64_t source_size = ByteSizeOf(source.shape());\n MemCpy(destination_value, llvm::Align(1), source_value,\n llvm::Align(1), source_size);\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::ElementTypesSameAndSupported(\n const HloInstruction& instruction,\n absl::Span operands,\n absl::Span supported_types) {\n for (auto operand : operands) {\n TF_RET_CHECK(\n ShapeUtil::SameElementType(operands[0]->shape(), operand->shape()));\n }\n TF_RET_CHECK(!operands.empty());\n PrimitiveType primitive_type = operands[0]->shape().element_type();\n if (!absl::c_linear_search(supported_types, primitive_type)) {\n return Unimplemented(\"unsupported operand type %s in op %s\",\n PrimitiveType_Name(primitive_type),\n HloOpcodeString(instruction.opcode()));\n }\n return absl::OkStatus();\n}\nabsl::Status IrEmitter::DefaultAction(HloInstruction* hlo) {\n ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;\n for (const HloInstruction* operand : hlo->operands()) {\n operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) {\n return GetIrArrayFor(operand).EmitReadArrayElement(index, b());\n };\n }\n CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);\n return EmitTargetElementLoop(\n hlo, \"elemental_loop\",\n elemental_emitter.MakeElementGenerator(hlo, operand_to_generator),\n std::nullopt);\n}\nllvm::Value* IrEmitter::EmitScalarReturningThreadLocalCall(\n const HloComputation& callee, absl::Span parameters,\n absl::string_view name) {\n std::vector return_value =\n EmitThreadLocalCall(callee, parameters, name, false);\n CHECK_EQ(return_value.size(), 1);\n return return_value[0];\n}\nstd::vector IrEmitter::EmitThreadLocalCall(\n const HloComputation& callee, absl::Span parameters,\n absl::string_view name, bool is_reducer, bool in_compute_function) {\n CHECK(absl::c_binary_search(thread_local_computations_, &callee));\n const Shape& return_shape = callee.root_instruction()->shape();\n bool is_scalar_return = ShapeUtil::IsScalar(return_shape);\n bool is_tuple_of_scalars_return =\n return_shape.IsTuple() &&\n absl::c_all_of(return_shape.tuple_shapes(), [&](const Shape& shape) {\n return ShapeUtil::IsScalar(shape);\n });\n CHECK(is_scalar_return || is_tuple_of_scalars_return);\n std::vector parameter_addrs;\n for (llvm::Value* parameter : parameters) {\n CHECK(!parameter->getType()->isPointerTy());\n llvm::Value* parameter_addr = llvm_ir::EmitAllocaAtFunctionEntry(\n parameter->getType(), \"arg_addr\", b());\n Store(parameter, parameter_addr);\n parameter_addrs.push_back(parameter_addr);\n }\n llvm::Type* return_value_buffer_type =\n llvm_ir::ShapeToIrType(return_shape, module_);\n std::string retval_alloca_name = absl::StrCat(name, \"_return_value_addr\");\n int retval_alignment =\n is_scalar_return\n ? MinimumAlignmentForPrimitiveType(return_shape.element_type())\n : 0;\n llvm::AllocaInst* return_value_buffer = llvm_ir::EmitAllocaAtFunctionEntry(\n return_value_buffer_type, retval_alloca_name, b(), retval_alignment);\n std::vector allocas_for_returned_scalars;\n if (is_scalar_return) {\n allocas_for_returned_scalars.push_back(return_value_buffer);\n } else {\n constexpr int max_tuple_size = 1000;\n CHECK_LT(return_shape.tuple_shapes_size(), max_tuple_size)\n << \"Multivalue function can not return more than 1000 elements to avoid\"\n << \" stack smashing\";\n allocas_for_returned_scalars =\n llvm_ir::EmitTupleAllocasAtFunctionEntry(return_shape, b());\n llvm_ir::IrArray tuple_array(return_value_buffer, return_value_buffer_type,\n return_shape);\n EmitTuple(tuple_array, allocas_for_returned_scalars, b());\n }\n llvm::Value* null_ptr = llvm::Constant::getNullValue(b()->getPtrTy());\n Call(\n FindOrDie(emitted_functions_,\n ComputationToEmit{&callee, allow_reassociation_ || is_reducer}),\n GetArrayFunctionCallArguments(\n parameter_addrs, b(), name,\n return_value_buffer,\n in_compute_function ? GetExecutableRunOptionsArgument() : null_ptr,\n null_ptr,\n in_compute_function ? GetStatusArgument() : null_ptr,\n in_compute_function ? GetProfileCountersArgument() : null_ptr));\n if (ComputationTransitivelyContainsCustomCall(&callee)) {\n DCHECK(!in_compute_function) << \"Custom call inside nested computations \"\n \"are not supported by Thunks runtime\";\n EmitEarlyReturnIfErrorStatus();\n }\n std::vector returned_scalars;\n returned_scalars.reserve(allocas_for_returned_scalars.size());\n for (llvm::Value* addr : allocas_for_returned_scalars) {\n returned_scalars.push_back(\n Load(llvm::cast(addr)->getAllocatedType(), addr));\n }\n return returned_scalars;\n}\nvoid IrEmitter::EmitGlobalCall(const HloComputation& callee,\n absl::string_view name) {\n CHECK(absl::c_binary_search(global_computations_, &callee));\n Call(FindOrDie(emitted_functions_,\n ComputationToEmit{&callee, allow_reassociation_}),\n GetArrayFunctionCallArguments(\n {}, b(), name,\n llvm::Constant::getNullValue(b()->getPtrTy()),\n GetExecutableRunOptionsArgument(),\n GetBufferTableArgument(),\n GetStatusArgument(),\n GetProfileCountersArgument()));\n if (ComputationTransitivelyContainsCustomCall(&callee)) {\n EmitEarlyReturnIfErrorStatus();\n }\n}\nllvm::Value* IrEmitter::GetBufferForGlobalCallReturnValue(\n const HloComputation& callee) {\n const HloInstruction* root_inst = callee.root_instruction();\n if (root_inst->opcode() == HloOpcode::kOutfeed) {\n return llvm::Constant::getNullValue(b()->getPtrTy());\n }\n const BufferAllocation::Slice root_buffer =\n assignment_.GetUniqueTopLevelSlice(root_inst).value();\n return EmitBufferPointer(root_buffer, root_inst->shape());\n}\nvoid IrEmitter::BindFusionArguments(const HloInstruction* fusion,\n FusedIrEmitter* fused_emitter) {\n for (int i = 0; i < fusion->operand_count(); i++) {\n const HloInstruction* operand = fusion->operand(i);\n fused_emitter->BindGenerator(\n *fusion->fused_parameter(i),\n [this, operand](llvm_ir::IrArray::Index index) {\n return GetIrArrayFor(operand).EmitReadArrayElement(index, b());\n });\n }\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/cpu/ir_emitter.h\"\n#include \n#include \n#include \n#include \"llvm/IR/BasicBlock.h\"\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/DerivedTypes.h\"\n#include \"llvm/IR/Function.h\"\n#include \"llvm/IR/GlobalValue.h\"\n#include \"llvm/IR/IRBuilder.h\"\n#include \"llvm/IR/Instructions.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/Support/Casting.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/service/buffer_assignment.h\"\n#include \"xla/service/cpu/ir_function.h\"\n#include \"xla/service/cpu/target_machine_features_fake.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/hlo_ordering.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/logical_buffer.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla::cpu {\nnamespace {\nusing IrEmitterTest = HloTestBase;\nstatic std::pair CreateFunction(\n llvm::LLVMContext& context, llvm::Module* module, llvm::IRBuilder<>* b) {\n llvm::PointerType* ptrtype = llvm::PointerType::getUnqual(context);\n llvm::FunctionType* ftype = llvm::FunctionType::get(ptrtype, ptrtype, false);\n llvm::Function* function = llvm::dyn_cast(\n module->getOrInsertFunction(\"func2\", ftype).getCallee());\n llvm::BasicBlock* return_block =\n llvm::BasicBlock::Create(context, \"\", function);\n b->SetInsertPoint(return_block);\n [[maybe_unused]] llvm::ReturnInst* ret = b->CreateRet(\n llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(context)));\n return std::make_pair(function, return_block);\n}\nTEST_F(IrEmitterTest, ComputeFuncStack) {\n llvm::LLVMContext context;\n auto module = std::make_unique(\"test\", context);\n const char* hlo_text = R\"(\n HloModule m\n ENTRY main {\n ROOT %zero = f32[] constant(0)\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text));\n const HloInstruction* zero = FindInstruction(hlo.get(), \"zero\");\n ASSERT_NE(zero, nullptr);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr buffer_assignment,\n BufferAssigner::Run(\n hlo.get(), std::make_unique(hlo.get()),\n backend().compiler()->BufferSizeBytesFunction(),\n [](LogicalBuffer::Color) { return 1; }));\n TargetMachineFeaturesWithFakeAlignmentLogic target_machine(\n [](int64_t size) { return 1; });\n IrEmitter ir_emitter(nullptr, *hlo, *buffer_assignment, module.get(), {}, {},\n {}, &target_machine, false);\n llvm::IRBuilder<>* b = ir_emitter.b();\n ASSERT_NE(b, nullptr);\n const std::pair fb =\n CreateFunction(context, module.get(), b);\n llvm::Function* function = fb.first;\n llvm::BasicBlock* return_block = fb.second;\n ASSERT_NE(function, nullptr);\n ASSERT_NE(return_block, nullptr);\n const auto funcname = \"func1\";\n const auto linkagetype = llvm::GlobalValue::LinkageTypes::ExternalLinkage;\n const HloModuleConfig module_config;\n ir_emitter.PushComputeFunction(funcname, linkagetype, module_config,\n module.get(), 0);\n ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(),\n funcname);\n ir_emitter.PushComputeFunction(b, module.get(), 0, function, nullptr,\n return_block);\n ASSERT_EQ(ir_emitter.compute_function()->function(), function);\n ir_emitter.PopComputeFunction();\n ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(),\n funcname);\n ir_emitter.PopComputeFunction();\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1195,"cells":{"ID":{"kind":"string","value":"94482255-ec7d-455f-8827-0a005b4924b8"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"execution_stream_assignment"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/execution_stream_assignment.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/execution_stream_assignment_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/execution_stream_assignment.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/gpu/runtime/thunk.h\"\nnamespace xla::gpu {\nExecutionStreamAssignment::ExecutionStreamAssignment(\n const HloModule* module, ExecutionStreamAssignmentOptions options) {\n std::unique_ptr call_graph = CallGraph::Build(module);\n ExecutionStreamId next_stream_id = ExecutionStreamId(1);\n struct Pending {\n Pending(HloComputation* node, ExecutionStreamId stream_id)\n : node(node), stream_id(stream_id) {}\n HloComputation* node;\n ExecutionStreamId stream_id;\n };\n std::deque queue;\n queue.emplace_back(module->entry_computation(), ExecutionStreamId(0));\n auto enqueue_called_computations = [&](const CallSite& callsite,\n ExecutionStreamId stream) {\n if (GetInstructionCallContext(callsite.instruction()->opcode()) ==\n CallContext::kEmbedded) {\n return;\n }\n for (HloComputation* computation : callsite.called_computations()) {\n queue.emplace_back(computation, stream);\n }\n };\n auto assign_async_execution_streams =\n [&](HloInstruction* instruction, ExecutionStreamId source_stream_id) {\n AsyncExecutionStreamIds streams;\n streams.source_stream_id = source_stream_id;\n streams.destination_stream_id = next_stream_id;\n CHECK(async_instructions_.try_emplace(instruction, streams).second);\n next_stream_id++;\n if (next_stream_id.value() > options.number_of_execution_streams) {\n next_stream_id = ExecutionStreamId(1);\n }\n };\n while (!queue.empty()) {\n Pending pending = queue.front();\n queue.pop_front();\n for (HloInstruction* instruction : pending.node->instructions()) {\n if (instruction->IsAsynchronous()) continue;\n if (instruction->opcode() == HloOpcode::kCopyStart) {\n assign_async_execution_streams(instruction, pending.stream_id);\n } else {\n CHECK(sync_instructions_.try_emplace(instruction, pending.stream_id)\n .second);\n }\n }\n for (const CallSite& callsite :\n call_graph->GetNode(pending.node).callsites()) {\n if (callsite.instruction()->IsAsynchronous()) {\n CHECK_EQ(callsite.instruction()->opcode(), HloOpcode::kAsyncStart);\n enqueue_called_computations(callsite, next_stream_id);\n assign_async_execution_streams(callsite.instruction(),\n pending.stream_id);\n } else {\n enqueue_called_computations(callsite, pending.stream_id);\n }\n }\n for (HloInstruction* instruction : pending.node->instructions()) {\n if (!instruction->IsAsynchronous()) continue;\n if (instruction->opcode() == HloOpcode::kAsyncStart) {\n CHECK(async_instructions_.find(instruction) !=\n async_instructions_.end());\n } else {\n HloInstruction* async_start =\n Cast(instruction)->async_chain_start();\n AsyncExecutionStreamIds async_start_streams =\n async_instructions_.at(async_start);\n CHECK(async_instructions_.try_emplace(instruction, async_start_streams)\n .second);\n }\n }\n }\n}\nnamespace {\nabsl::Status StreamNotFoundError(const HloInstruction* instruction) {\n return absl::NotFoundError(absl::StrCat(\n \"No ExecutionStreamId found for \", instruction->ToString(),\n \"; this may happen if the Computation is not reachable from the module's \"\n \"entrypoint, or if it's only reachable through a embedded calls.\"));\n}\n} \nabsl::StatusOr\nExecutionStreamAssignment::GetSyncExecutionStreamId(\n const HloInstruction* instruction) const {\n CHECK(!instruction->IsAsynchronous());\n auto stream = sync_instructions_.find(instruction);\n if (stream == sync_instructions_.end()) {\n return StreamNotFoundError(instruction);\n }\n return stream->second;\n}\nabsl::StatusOr\nExecutionStreamAssignment::GetAsyncExecutionStreamIds(\n const HloInstruction* instruction) const {\n CHECK(instruction->IsAsynchronous() ||\n instruction->opcode() == HloOpcode::kCopyStart);\n auto streams = async_instructions_.find(instruction);\n if (streams == async_instructions_.end()) {\n return StreamNotFoundError(instruction);\n }\n return streams->second;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/execution_stream_assignment.h\"\n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/service/gpu/runtime/thunk.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/statusor.h\"\nusing ::tsl::testing::IsOkAndHolds;\nusing ::tsl::testing::StatusIs;\nusing AsyncExecutionStreamIds =\n ::xla::gpu::ExecutionStreamAssignment::AsyncExecutionStreamIds;\nnamespace xla::gpu {\nnamespace {\nclass ExecutionStreamAssignmentTest : public HloTestBase {\n protected:\n void ExpectExecutionStreamForSyncInstructions(\n const ExecutionStreamAssignment& assignment, HloComputation* computation,\n ExecutionStreamId stream) const {\n for (const HloInstruction* instruction : computation->instructions()) {\n if (instruction->IsAsynchronous()) continue;\n EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),\n IsOkAndHolds(stream));\n }\n }\n};\nTEST_F(ExecutionStreamAssignmentTest, AsyncFusion) {\n const char* kModuleStr = R\"(\n HloModule m\n leaf1 {\n p0 = f32[2,2] parameter(0)\n ROOT add = f32[2,2] add(p0, p0)\n }\n leaf2 {\n p0 = f32[2,2] parameter(0)\n ROOT add = f32[2,2] add(p0, p0)\n }\n leaf3 {\n p0 = f32[2,2] parameter(0)\n ROOT add = f32[2,2] add(p0, p0)\n }\n ENTRY entry {\n p0 = f32[2,2] parameter(0)\n start1 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),\n kind=kLoop, calls=leaf1\n start2 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),\n kind=kLoop, calls=leaf2\n start3 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),\n kind=kLoop, calls=leaf3\n update1 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start1)\n update2 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start2)\n update3 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start3)\n done1 = f32[2,2] fusion-done(update1)\n done2 = f32[2,2] fusion-done(update2)\n done3 = f32[2,2] fusion-done(update3)\n ROOT done = f32[2,2] custom-call(done1, done2, done3),\n custom_call_target=\"target\"\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ExecutionStreamAssignment assignment(\n module.get(),\n ExecutionStreamAssignmentOptions{2});\n ExpectExecutionStreamForSyncInstructions(\n assignment, FindComputation(module.get(), \"entry\"), ExecutionStreamId(0));\n for (std::string_view instruction : {\"start1\", \"update1\", \"done1\"}) {\n EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast(\n FindInstruction(module.get(), instruction))),\n IsOkAndHolds(AsyncExecutionStreamIds{\n ExecutionStreamId(0),\n ExecutionStreamId(1)}));\n }\n for (std::string_view instruction : {\"start2\", \"update2\", \"done2\"}) {\n EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast(\n FindInstruction(module.get(), instruction))),\n IsOkAndHolds(AsyncExecutionStreamIds{\n ExecutionStreamId(0),\n ExecutionStreamId(2)}));\n }\n for (std::string_view instruction : {\"start3\", \"update3\", \"done3\"}) {\n EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast(\n FindInstruction(module.get(), instruction))),\n IsOkAndHolds(AsyncExecutionStreamIds{\n ExecutionStreamId(0),\n ExecutionStreamId(1)}));\n }\n ExpectExecutionStreamForSyncInstructions(\n assignment,\n Cast(FindInstruction(module.get(), \"start1\"))\n ->async_wrapped_computation(),\n ExecutionStreamId(1));\n ExpectExecutionStreamForSyncInstructions(\n assignment,\n Cast(FindInstruction(module.get(), \"start2\"))\n ->async_wrapped_computation(),\n ExecutionStreamId(2));\n}\nTEST_F(ExecutionStreamAssignmentTest, CopyStartStreamIdTest) {\n const char* const hlo_copy_start_string = R\"(\n HloModule Module\n ENTRY CopyStartAndCopyDone {\n p0 = f32[2,3]{1,0:S(1)} parameter(0)\n copy-start = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)\n ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_copy_start_string));\n ExecutionStreamAssignment assignment(module.get());\n for (std::string_view instruction : {\"copy-start\"}) {\n EXPECT_THAT(\n assignment.GetAsyncExecutionStreamIds(Cast(\n FindInstruction(module.get(), instruction))),\n IsOkAndHolds(AsyncExecutionStreamIds{\n ExecutionStreamId(0),\n ExecutionStreamId(1)}));\n }\n}\nTEST_F(ExecutionStreamAssignmentTest, FusionComputations) {\n const char* kModuleStr = R\"(\n HloModule m\n reduce {\n p0 = f32[] parameter(0)\n p1 = f32[] parameter(1)\n ROOT add = f32[] add(p0, p1)\n }\n fusion {\n p0 = f32[4] parameter(0)\n c0 = f32[] constant(0)\n ROOT reduce = f32[] reduce(p0, c0), dimensions={0}, to_apply=reduce\n }\n ENTRY entry {\n p0 = f32[4] parameter(0)\n ROOT done = f32[] fusion(p0), kind=kLoop, calls=fusion\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ExecutionStreamAssignment assignment(module.get());\n ExpectExecutionStreamForSyncInstructions(\n assignment, FindComputation(module.get(), \"entry\"), ExecutionStreamId(0));\n for (std::string_view computation : {\"reduce\", \"fusion\"}) {\n for (const HloInstruction* instruction :\n FindComputation(module.get(), computation)->instructions()) {\n EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),\n StatusIs(absl::StatusCode::kNotFound));\n }\n }\n}\nTEST_F(ExecutionStreamAssignmentTest, UnreachableComputation) {\n const char* kModuleStr = R\"(\n HloModule m\n unreachable {\n p0 = f32[2,2] parameter(0)\n ROOT add = f32[2,2] add(p0, p0)\n }\n ENTRY entry {\n p0 = f32[2,2] parameter(0)\n ROOT add = f32[2,2] add(p0, p0)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr));\n ExecutionStreamAssignment assignment(module.get());\n ExpectExecutionStreamForSyncInstructions(\n assignment, FindComputation(module.get(), \"entry\"), ExecutionStreamId(0));\n for (const HloInstruction* instruction :\n FindComputation(module.get(), \"unreachable\")->instructions()) {\n EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),\n StatusIs(absl::StatusCode::kNotFound));\n }\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/execution_stream_assignment.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/execution_stream_assignment_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1196,"cells":{"ID":{"kind":"string","value":"2e67453e-51a6-4245-90a4-4e34db3316cd"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_algorithm_denylist"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/hlo_algorithm_denylist.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/hlo_algorithm_denylist_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/hlo_algorithm_denylist.h\"\n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/log/check.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"xla/debug_options_flags.h\"\n#include \"xla/hlo/ir/backend_config.h\"\n#include \"xla/service/gpu/autotuning/gpu_autotuning.pb.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/stream_executor/dnn.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/protobuf.h\"\n#include \"tsl/platform/status.h\"\nnamespace xla {\nnamespace gpu {\nconstexpr char kDefaultDenylist[] = R\"pb(\n entries {\n hlo: \"(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\\\"__cudnn$convBiasActivationForward\\\"\"\n backend_config {\n operation_queue_id: 0\n wait_on_operation_queues: []\n cudnn_conv_backend_config: {\n activation_mode: kNone\n conv_result_scale: 1\n side_input_scale: 0\n leakyrelu_alpha: 0\n },\n force_earliest_schedule: false\n }\n cc { major: 7 }\n cudnn_version { major: 9 }\n algos { id: 14 }\n }\n entries {\n hlo: \"(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\\\"__cudnn$convBiasActivationForward\\\"\"\n backend_config {\n operation_queue_id: 0\n wait_on_operation_queues: []\n cudnn_conv_backend_config: {\n activation_mode: kNone\n conv_result_scale: 1\n side_input_scale: 0\n leakyrelu_alpha: 0\n },\n force_earliest_schedule: false\n }\n cc { major: 7 }\n cudnn_version { major: 9 minor: 1 patch: 1 }\n algos { id: 14 }\n }\n entries {\n hlo: \"(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\\\"__cudnn$convBiasActivationForward\\\"\"\n backend_config {\n operation_queue_id: 0\n wait_on_operation_queues: []\n cudnn_conv_backend_config: {\n activation_mode: kNone\n conv_result_scale: 1\n side_input_scale: 1,\n leakyrelu_alpha: 0\n },\n force_earliest_schedule: false\n }\n cc { major: 7 }\n cudnn_version { major: 9 }\n algos { id: 14 }\n }\n entries {\n hlo: \"(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\\\"__cudnn$convBiasActivationForward\\\"\"\n backend_config {\n operation_queue_id: 0\n wait_on_operation_queues: []\n cudnn_conv_backend_config: {\n activation_mode: kNone\n conv_result_scale: 1\n side_input_scale: 1\n leakyrelu_alpha: 0\n },\n force_earliest_schedule: false\n }\n cc { major: 7 minor: 5 }\n cudnn_version { major: 9 }\n algos { id: 14 }\n }\n entries {\n hlo: \"(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\\\"__cudnn$convBiasActivationForward\\\"\"\n backend_config {\n operation_queue_id: 0\n wait_on_operation_queues: []\n cudnn_conv_backend_config: {\n activation_mode: kNone\n conv_result_scale: 1\n side_input_scale: 1\n leakyrelu_alpha: 0\n },\n force_earliest_schedule: false\n }\n cc { major: 7 }\n cudnn_version { major: 9 minor: 1 patch: 1 }\n algos { id: 14 }\n }\n entries {\n hlo: \"(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\\\"__cudnn$convBiasActivationForward\\\"\"\n backend_config {\n operation_queue_id: 0\n wait_on_operation_queues: []\n cudnn_conv_backend_config: {\n activation_mode: kNone\n conv_result_scale: 1\n side_input_scale: 1\n leakyrelu_alpha: 0\n },\n force_earliest_schedule: false\n }\n cc { major: 7 minor: 5 }\n cudnn_version { major: 9 minor: 1 patch: 1 }\n algos { id: 14 }\n }\n)pb\";\nstd::vector GetDisabledConvAlgorithms(\n ComputeCapability cc, CudnnVersion cudnn_version,\n const std::string& blas_version, const std::string& hlo) {\n using MapType = absl::flat_hash_map<\n std::tuple,\n std::vector>;\n static MapType* denylist = [] {\n auto* list = new MapType();\n AlgorithmDenylist proto;\n auto process_denylist = [list](const AlgorithmDenylist& proto) {\n for (const auto& entry : proto.entries()) {\n for (const auto& algo : entry.algos()) {\n (*list)[std::make_tuple(HloStringWithGpuBackendConfig(\n entry.hlo(), entry.backend_config()),\n entry.cc().major(), entry.cc().minor(),\n entry.cudnn_version().major(),\n entry.cudnn_version().minor(),\n entry.cudnn_version().patch(),\n entry.blas_version())]\n .emplace_back(algo.id(), algo.tensor_ops(), std::nullopt);\n }\n }\n };\n std::string file_path =\n GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path();\n if (!file_path.empty()) {\n TF_CHECK_OK(tsl::ReadTextProto(tsl::Env::Default(), file_path, &proto));\n process_denylist(proto);\n }\n CHECK(tsl::protobuf::TextFormat::ParseFromString(\n std::string(kDefaultDenylist), &proto));\n process_denylist(proto);\n return list;\n }();\n std::vector algorithms;\n auto add_matching_disabled_algorithms_to_result = [&](const auto& key) {\n auto iter = denylist->find(key);\n if (iter != denylist->end()) {\n algorithms.insert(algorithms.end(), iter->second.begin(),\n iter->second.end());\n }\n };\n auto key = std::make_tuple(hlo, cc.major(), cc.minor(), cudnn_version.major(),\n cudnn_version.minor(), cudnn_version.patch(),\n blas_version);\n add_matching_disabled_algorithms_to_result(key);\n std::get<6>(key) = std::string{};\n add_matching_disabled_algorithms_to_result(key);\n return algorithms;\n}\nstd::string HloStringWithGpuBackendConfig(const std::string& hlo,\n GpuBackendConfig config) {\n BackendConfigWrapper backend_config(config);\n return absl::StrCat(hlo, \", backend_config=\", backend_config.GetRawString());\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/hlo_algorithm_denylist.h\"\n#include \n#include \n#include \"absl/strings/str_cat.h\"\n#include \"xla/stream_executor/dnn.h\"\n#include \"xla/tests/test_utils.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/path.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nclass DenylistTest : public testing::Test {\n protected:\n DenylistTest() {\n std::string existing_xla_flags;\n const char* env = std::getenv(\"XLA_FLAGS\");\n if (env != nullptr) {\n existing_xla_flags = absl::StrCat(env, \" \");\n }\n tsl::setenv(\n \"XLA_FLAGS\",\n absl::StrCat(\n existing_xla_flags, \"--xla_gpu_algorithm_denylist_path=\",\n tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"service\", \"gpu\",\n \"data\", \"hlo_algorithm_denylist.pbtxt\"))\n .data(),\n 1);\n config_ =\n ParseTextProto(\n \"operation_queue_id: 0 wait_on_operation_queues: [] \"\n \"cudnn_conv_backend_config: { activation_mode: kNone \"\n \"conv_result_scale: 1 side_input_scale: 0 leakyrelu_alpha: 0} \"\n \"force_earliest_schedule: false\")\n .value();\n }\n GpuBackendConfig config_;\n};\nTEST_F(DenylistTest, DefaultTest) {\n ComputeCapability cc;\n cc.set_major(7);\n cc.set_minor(0);\n CudnnVersion cudnn_version;\n cudnn_version.set_major(7);\n cudnn_version.set_minor(6);\n cudnn_version.set_patch(2);\n auto list = GetDisabledConvAlgorithms(\n cc, cudnn_version, \"9000\",\n HloStringWithGpuBackendConfig(\n R\"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target=\"__cudnn$convForward\")\",\n config_));\n EXPECT_THAT(list, testing::UnorderedElementsAre(\n stream_executor::dnn::AlgorithmDesc{0, true},\n stream_executor::dnn::AlgorithmDesc{0, false},\n stream_executor::dnn::AlgorithmDesc{1, true},\n stream_executor::dnn::AlgorithmDesc{1, false},\n stream_executor::dnn::AlgorithmDesc{42, true},\n stream_executor::dnn::AlgorithmDesc{42, false}));\n}\nTEST_F(DenylistTest, NegativeTest) {\n ComputeCapability cc;\n cc.set_major(7);\n cc.set_minor(0);\n CudnnVersion cudnn_version;\n cudnn_version.set_major(7);\n cudnn_version.set_minor(6);\n cudnn_version.set_minor(2);\n auto list =\n GetDisabledConvAlgorithms(cc, cudnn_version, \"9000\", R\"(invalid hlo)\");\n EXPECT_THAT(list, testing::IsEmpty());\n}\nTEST_F(DenylistTest, NoBlasVersionSet) {\n ComputeCapability cc;\n cc.set_major(7);\n cc.set_minor(0);\n CudnnVersion cudnn_version;\n cudnn_version.set_major(7);\n cudnn_version.set_minor(6);\n cudnn_version.set_patch(2);\n auto list = GetDisabledConvAlgorithms(\n cc, cudnn_version, \"120301\",\n HloStringWithGpuBackendConfig(\n R\"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target=\"__cudnn$convForward\")\",\n config_));\n EXPECT_THAT(list, testing::UnorderedElementsAre(\n stream_executor::dnn::AlgorithmDesc{42, true},\n stream_executor::dnn::AlgorithmDesc{42, false}));\n}\nTEST_F(DenylistTest, EntryFromHardcodedList) {\n ComputeCapability cc;\n cc.set_major(7);\n cc.set_minor(0);\n CudnnVersion cudnn_version;\n cudnn_version.set_major(9);\n cudnn_version.set_minor(0);\n cudnn_version.set_patch(0);\n auto list = GetDisabledConvAlgorithms(\n cc, cudnn_version, \"9000\",\n HloStringWithGpuBackendConfig(\n R\"((f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\")\",\n config_));\n EXPECT_THAT(list, testing::ElementsAre(\n stream_executor::dnn::AlgorithmDesc{14, false}));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_algorithm_denylist.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_algorithm_denylist_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1197,"cells":{"ID":{"kind":"string","value":"0147930e-0d97-4677-8292-daa6da3d7261"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"matmul_utils"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/matmul_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/matmul_utils_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/matmul_utils.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/types/span.h\"\n#include \"xla/autotuning.pb.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/algorithm_util.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/stream_executor/blas.h\"\n#include \"xla/stream_executor/device_memory.h\"\n#include \"xla/stream_executor/gpu/gpu_blas_lt.h\"\n#include \"xla/stream_executor/numeric_options.h\"\n#include \"xla/stream_executor/stream_executor.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nabsl::StatusOr> GetNonContractingDims(\n const Shape& shape, absl::Span batch_dims,\n absl::Span contracting_dims) {\n std::vector non_contracting_dims;\n for (int64_t dim = 0; dim < shape.rank(); ++dim) {\n bool is_batch = absl::c_count(batch_dims, dim) != 0;\n bool is_contracting = absl::c_count(contracting_dims, dim) != 0;\n TF_RET_CHECK(!(is_batch && is_contracting));\n if (!(is_batch || is_contracting)) non_contracting_dims.push_back(dim);\n }\n TF_RET_CHECK(batch_dims.size() + contracting_dims.size() +\n non_contracting_dims.size() ==\n shape.rank());\n return non_contracting_dims;\n}\nconst tsl::protobuf::RepeatedField& BatchDimensionsForOperand(\n const HloInstruction& dot, const int operand_number) {\n const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();\n if (operand_number == 0) {\n return dimension_numbers.lhs_batch_dimensions();\n }\n return dimension_numbers.rhs_batch_dimensions();\n}\nabsl::StatusOr ContractingDimensionIndex(const HloInstruction& dot,\n const int operand_number) {\n const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();\n if (operand_number == 0) {\n TF_RET_CHECK(dimension_numbers.lhs_contracting_dimensions().size() == 1);\n return dimension_numbers.lhs_contracting_dimensions(0);\n }\n TF_RET_CHECK(dimension_numbers.rhs_contracting_dimensions().size() == 1);\n return dimension_numbers.rhs_contracting_dimensions(0);\n}\nabsl::StatusOr NonContractingDimensionIndex(const HloInstruction& dot,\n const int operand_number) {\n TF_ASSIGN_OR_RETURN(int64_t contracting_dim,\n ContractingDimensionIndex(dot, operand_number));\n TF_ASSIGN_OR_RETURN(\n std::vector non_contracting_dims,\n GetNonContractingDims(dot.operand(operand_number)->shape(),\n BatchDimensionsForOperand(dot, operand_number),\n {contracting_dim}));\n TF_RET_CHECK(non_contracting_dims.size() == 1);\n return non_contracting_dims.front();\n}\nabsl::StatusOr GetBatchRowColumnShape(\n const Shape& shape, absl::Span batch_dims,\n absl::Span row_dims, absl::Span col_dims) {\n TF_RET_CHECK(shape.has_layout());\n std::vector minor_to_major;\n for (size_t i = 0; i < shape.rank();) {\n auto check_physically_sequential =\n [&](absl::Span dims) -> absl::Status {\n for (auto it = dims.rbegin(); it != dims.rend(); ++it) {\n if (*it != shape.layout().minor_to_major()[i++])\n return InvalidArgument(\"dims not physically_sequential\");\n }\n return absl::OkStatus();\n };\n int64_t dim = shape.layout().minor_to_major()[i];\n if (!row_dims.empty() && dim == row_dims.back()) {\n minor_to_major.push_back(1);\n TF_RETURN_IF_ERROR(check_physically_sequential(row_dims));\n } else if (!col_dims.empty() && dim == col_dims.back()) {\n minor_to_major.push_back(2);\n TF_RETURN_IF_ERROR(check_physically_sequential(col_dims));\n } else if (!batch_dims.empty() && (dim == batch_dims.back())) {\n minor_to_major.push_back(0);\n TF_RETURN_IF_ERROR(check_physically_sequential(batch_dims));\n } else {\n return InvalidArgument(\"dims not physically sequential\");\n }\n }\n if (col_dims.empty()) minor_to_major.push_back(2);\n if (row_dims.empty()) minor_to_major.push_back(1);\n if (batch_dims.empty()) minor_to_major.push_back(0);\n auto dim_size = [&](absl::Span dims) {\n return absl::c_accumulate(dims, 1, [&](int64_t size, int64_t dim) {\n return size * shape.dimensions(dim);\n });\n };\n return ShapeUtil::MakeShapeWithDenseLayout(\n shape.element_type(),\n {dim_size(batch_dims), dim_size(row_dims), dim_size(col_dims)},\n minor_to_major);\n}\n absl::StatusOr MatrixLayout::For(const Shape& shape) {\n TF_RET_CHECK(shape.rank() == 3);\n TF_RET_CHECK(shape.has_layout());\n int64_t batch_size = shape.dimensions(0);\n int64_t num_rows = shape.dimensions(1);\n int64_t num_cols = shape.dimensions(2);\n Order order{Order::kRowMajor};\n int64_t leading_dim_stride = num_cols;\n int64_t batch_stride = num_rows * num_cols;\n absl::Span minor_to_major = shape.layout().minor_to_major();\n switch (64 * minor_to_major[2] + 8 * minor_to_major[1] + minor_to_major[0]) {\n case 012: \n break;\n case 021: \n order = Order::kColumnMajor;\n leading_dim_stride = num_rows;\n break;\n case 0102: \n leading_dim_stride = batch_size * num_cols;\n batch_stride = num_cols;\n break;\n case 0201: \n order = Order::kColumnMajor;\n leading_dim_stride = batch_size * num_rows;\n batch_stride = num_rows;\n break;\n default:\n return Unimplemented(\"batch in most minor dimension\");\n }\n if (batch_size == 1) {\n batch_stride = 0;\n }\n return MatrixLayout{se::gpu::MatrixLayout{shape.element_type(), num_rows,\n num_cols, order, batch_size,\n leading_dim_stride, batch_stride}};\n}\n absl::StatusOr MatrixLayout::For(\n const Shape& shape, absl::Span batch_dims,\n absl::Span row_dims, absl::Span col_dims) {\n TF_ASSIGN_OR_RETURN(\n Shape batch_row_col_shape,\n GetBatchRowColumnShape(shape, batch_dims, row_dims, col_dims));\n return MatrixLayout::For(batch_row_col_shape);\n}\n absl::StatusOr MatrixLayout::For(\n const Shape& shape, size_t lhs_num_batch_dims, size_t lhs_num_row_dims,\n size_t rhs_num_batch_dims, size_t rhs_num_col_dims) {\n size_t num_batch_dims = std::max(lhs_num_batch_dims, rhs_num_batch_dims);\n TF_RET_CHECK(shape.rank() ==\n num_batch_dims + lhs_num_row_dims + rhs_num_col_dims);\n std::vector dims(shape.rank());\n absl::c_iota(dims, 0);\n auto batch_dims = absl::Span(dims).first(num_batch_dims);\n auto row_dims =\n absl::Span(dims).subspan(num_batch_dims, lhs_num_row_dims);\n auto col_dims = absl::Span(dims).last(rhs_num_col_dims);\n return MatrixLayout::For(shape, batch_dims, row_dims, col_dims);\n}\nnamespace {\nstd::vector NormalizedRelativeOrder(absl::Span dims) {\n std::vector indices(dims.size());\n absl::c_iota(indices, 0);\n absl::c_sort(indices,\n [&](int64_t a, int64_t b) { return dims[a] < dims[b]; });\n return indices;\n}\n} \nabsl::StatusOr CanFoldTransposeOperandIntoDot(const HloInstruction& dot,\n int64_t operand_idx) {\n if (Cast(&dot)->sparse_operands()) {\n return false;\n }\n TF_RET_CHECK(dot.opcode() == HloOpcode::kDot);\n TF_RET_CHECK(dot.operand_count() > operand_idx);\n const HloInstruction& transpose = *dot.operand(operand_idx);\n TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose);\n const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();\n auto transposed = [&](const auto& dims) {\n std::vector transposed_dims;\n transposed_dims.reserve(dims.size());\n for (int64_t dim : dims) {\n transposed_dims.push_back(transpose.dimensions(dim));\n }\n return transposed_dims;\n };\n auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions()\n : dot_dims.rhs_batch_dimensions();\n auto contracting_dims = (operand_idx == 0)\n ? dot_dims.lhs_contracting_dimensions()\n : dot_dims.rhs_contracting_dimensions();\n TF_ASSIGN_OR_RETURN(\n std::vector non_contracting_dims,\n GetNonContractingDims(transpose.shape(), batch_dims, contracting_dims));\n auto transposed_non_contracting_dims = transposed(non_contracting_dims);\n if (NormalizedRelativeOrder(non_contracting_dims) !=\n NormalizedRelativeOrder(transposed_non_contracting_dims)) {\n return false;\n }\n return MatrixLayout::For(transpose.operand(0)->shape(),\n transposed(batch_dims), transposed(contracting_dims),\n transposed_non_contracting_dims)\n .ok();\n}\n absl::StatusOr GemmConfig::For(\n const Shape& lhs_shape, absl::Span lhs_batch_dims,\n absl::Span lhs_contracting_dims, const Shape& rhs_shape,\n absl::Span rhs_batch_dims,\n absl::Span rhs_contracting_dims, const Shape& output_shape,\n double alpha_real, double alpha_imag, double beta,\n PrecisionConfig::Algorithm precision_algorithm,\n std::optional algorithm, int64_t compute_precision, bool grad_x,\n bool grad_y) {\n return GemmConfig::For(lhs_shape, lhs_batch_dims, lhs_contracting_dims,\n rhs_shape, rhs_batch_dims, rhs_contracting_dims,\n output_shape, nullptr,\n output_shape, alpha_real, alpha_imag, beta,\n precision_algorithm, algorithm, compute_precision,\n grad_x, grad_y);\n}\n absl::StatusOr GemmConfig::For(\n const Shape& lhs_shape, absl::Span lhs_batch_dims,\n absl::Span lhs_contracting_dims, const Shape& rhs_shape,\n absl::Span rhs_batch_dims,\n absl::Span rhs_contracting_dims, const Shape& c_shape,\n const Shape* bias_shape_ptr, const Shape& output_shape, double alpha_real,\n double alpha_imag, double beta,\n PrecisionConfig::Algorithm precision_algorithm,\n std::optional algorithm, int64_t compute_precision, bool grad_x,\n bool grad_y) {\n absl::Span lhs_col_dims = lhs_contracting_dims;\n TF_ASSIGN_OR_RETURN(\n std::vector lhs_row_dims,\n GetNonContractingDims(lhs_shape, lhs_batch_dims, lhs_col_dims));\n TF_ASSIGN_OR_RETURN(\n MatrixLayout lhs_layout,\n MatrixLayout::For(lhs_shape, lhs_batch_dims, lhs_row_dims, lhs_col_dims));\n absl::Span rhs_row_dims = rhs_contracting_dims;\n TF_ASSIGN_OR_RETURN(\n std::vector rhs_col_dims,\n GetNonContractingDims(rhs_shape, rhs_batch_dims, rhs_row_dims));\n TF_ASSIGN_OR_RETURN(\n MatrixLayout rhs_layout,\n MatrixLayout::For(rhs_shape, rhs_batch_dims, rhs_row_dims, rhs_col_dims));\n int64_t num_batch_dims =\n std::max(lhs_batch_dims.size(), rhs_batch_dims.size());\n TF_RET_CHECK(output_shape.rank() ==\n num_batch_dims + lhs_row_dims.size() + rhs_col_dims.size());\n std::vector output_dims(output_shape.rank());\n absl::c_iota(output_dims, 0);\n auto output_batch_dims =\n absl::Span(output_dims).first(num_batch_dims);\n auto output_row_dims = absl::Span(output_dims)\n .subspan(num_batch_dims, lhs_row_dims.size());\n auto output_col_dims =\n absl::Span(output_dims).last(rhs_col_dims.size());\n TF_ASSIGN_OR_RETURN(MatrixLayout output_layout,\n MatrixLayout::For(output_shape, output_batch_dims,\n output_row_dims, output_col_dims));\n Shape c_matrix_shape = c_shape;\n if (primitive_util::IsF8Type(lhs_shape.element_type()) &&\n primitive_util::IsF8Type(output_shape.element_type()) && beta == 0.0) {\n#if GOOGLE_CUDA\n c_matrix_shape.set_element_type(\n bias_shape_ptr != nullptr ? bias_shape_ptr->element_type() : BF16);\n#endif\n }\n TF_ASSIGN_OR_RETURN(MatrixLayout c_layout,\n MatrixLayout::For(c_matrix_shape, output_batch_dims,\n output_row_dims, output_col_dims));\n if (lhs_shape.element_type() != F8E4M3FN &&\n lhs_shape.element_type() != F8E5M2) {\n TF_RET_CHECK(lhs_layout.num_cols == rhs_layout.num_rows);\n TF_RET_CHECK(output_layout.num_rows == lhs_layout.num_rows);\n TF_RET_CHECK(output_layout.num_cols == rhs_layout.num_cols);\n }\n TF_RET_CHECK(c_layout.num_rows == output_layout.num_rows);\n TF_RET_CHECK(c_layout.num_cols == output_layout.num_cols);\n TF_RET_CHECK((lhs_layout.batch_size == output_layout.batch_size) ||\n (lhs_layout.batch_size == 1));\n TF_RET_CHECK((rhs_layout.batch_size == output_layout.batch_size) ||\n (rhs_layout.batch_size == 1));\n switch (output_shape.element_type()) {\n case F8E4M3FN:\n case F8E5M2:\n case F8E4M3FNUZ:\n case F8E5M2FNUZ:\n case F16:\n case BF16:\n case F32:\n case F64:\n TF_RET_CHECK(alpha_imag == 0);\n break;\n case C64:\n case C128:\n break;\n case S32:\n TF_RET_CHECK(alpha_imag == 0);\n if (lhs_layout.dtype != PrimitiveType::S8 ||\n rhs_layout.dtype != PrimitiveType::S8) {\n return Internal(\n \"For int32 gemm output only int8 input is supported, got input: \"\n \"%s, %s\",\n primitive_util::LowercasePrimitiveTypeName(lhs_layout.dtype),\n primitive_util::LowercasePrimitiveTypeName(rhs_layout.dtype));\n }\n break;\n default:\n return Internal(\"Unexpected GEMM datatype: %s\",\n primitive_util::LowercasePrimitiveTypeName(\n output_shape.element_type()));\n }\n return GemmConfig{lhs_layout,\n rhs_layout,\n c_layout,\n output_layout,\n {alpha_real, alpha_imag},\n beta,\n compute_precision,\n precision_algorithm,\n algorithm,\n grad_x,\n grad_y};\n}\nnamespace {\nbool IsTf32Allowed(PrecisionConfig::Algorithm algorithm,\n int64_t compute_precision) {\n if (algorithm == PrecisionConfig::ALG_UNSET) {\n return compute_precision <= 1;\n }\n return algorithm_util::HasTf32InputType(algorithm);\n}\n} \n absl::StatusOr GemmConfig::For(\n const HloInstruction* gemm) {\n TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,\n gemm->backend_config());\n return For(gemm, gpu_config.gemm_backend_config());\n}\n absl::StatusOr GemmConfig::For(\n const HloInstruction* gemm, const GemmBackendConfig& config) {\n std::optional algorithm;\n if (config.algorithm_case() != GemmBackendConfig::ALGORITHM_NOT_SET) {\n algorithm = config.selected_algorithm();\n } else {\n algorithm = se::blas::kDefaultAlgorithm;\n }\n const Shape& lhs_shape = gemm->operand(0)->shape();\n const Shape& rhs_shape = gemm->operand(1)->shape();\n const DotDimensionNumbers& dot_dims = config.dot_dimension_numbers();\n const Shape& output_shape =\n gemm->shape().IsTuple() ? gemm->shape().tuple_shapes(0) : gemm->shape();\n bool has_matrix_bias = config.beta() != 0.;\n Shape c_shape = has_matrix_bias ? gemm->operand(2)->shape() : output_shape;\n std::optional vector_bias_shape;\n TF_ASSIGN_OR_RETURN(\n bool has_vector_bias,\n xla::gpu::gpublas_lt::EpilogueAddsVectorBias(config.epilogue()));\n if (has_vector_bias) {\n int vector_bias_index = has_matrix_bias ? 3 : 2;\n if (primitive_util::IsF8Type(lhs_shape.element_type())) {\n vector_bias_index += 2;\n }\n vector_bias_shape = gemm->operand(vector_bias_index)->shape();\n }\n auto attributes = gemm->frontend_attributes().map();\n bool grad_x = (attributes[\"grad_x\"] == \"true\");\n bool grad_y = (attributes[\"grad_y\"] == \"true\");\n int64_t precision = se::blas::kDefaultComputePrecision;\n for (auto operand_precision : config.precision_config().operand_precision()) {\n precision = std::max(precision, static_cast(operand_precision));\n }\n const PrecisionConfig::Algorithm precision_algorithm =\n config.precision_config().algorithm();\n return GemmConfig::For(\n lhs_shape, dot_dims.lhs_batch_dimensions(),\n dot_dims.lhs_contracting_dimensions(), rhs_shape,\n dot_dims.rhs_batch_dimensions(), dot_dims.rhs_contracting_dimensions(),\n c_shape,\n vector_bias_shape ? &vector_bias_shape.value() : nullptr, output_shape,\n config.alpha_real(), config.alpha_imag(), config.beta(),\n precision_algorithm, algorithm, precision, grad_x, grad_y);\n}\nabsl::StatusOr GemmConfig::GetMatrixDescriptors(\n se::DeviceMemoryBase lhs_buf, se::DeviceMemoryBase rhs_buf,\n se::DeviceMemoryBase out_buf) const {\n auto create_matrix_desc = [](const se::gpu::MatrixLayout& layout,\n se::DeviceMemoryBase data)\n -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(se::blas::DataType type,\n se::gpu::AsBlasDataType(layout.dtype));\n return se::gpu::MatrixDescriptor{\n data, layout.leading_dim_stride, layout.batch_stride, type,\n (layout.order == se::gpu::MatrixLayout::Order::kColumnMajor\n ? se::blas::Transpose::kNoTranspose\n : se::blas::Transpose::kTranspose)};\n };\n se::gpu::MatrixLayout lhs = lhs_layout, rhs = rhs_layout, out = output_layout;\n bool must_swap_operands = MakeOutputColumnMajor(lhs, rhs, out);\n if (must_swap_operands) {\n std::swap(lhs_buf, rhs_buf);\n }\n TF_ASSIGN_OR_RETURN(se::gpu::OutputMatrixDescriptor out_desc,\n create_matrix_desc(out, out_buf));\n out_desc.batch_size = out.batch_size;\n out_desc.m = out.num_rows;\n out_desc.n = out.num_cols;\n out_desc.k = lhs.num_cols;\n TF_ASSIGN_OR_RETURN(out_desc.compute_type,\n se::gpu::GetBlasComputationType(\n PrecisionConfig::ALG_UNSET, lhs.dtype, out.dtype,\n se::blas::kDefaultComputePrecision));\n TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor lhs_desc,\n create_matrix_desc(lhs, lhs_buf));\n TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor rhs_desc,\n create_matrix_desc(rhs, rhs_buf));\n return DescriptorsTuple{lhs_desc, rhs_desc, out_desc, must_swap_operands};\n}\nnamespace {\ntemplate \nabsl::Status DoGemmWithAlgorithm(const se::gpu::MatrixDescriptor& lhs,\n const se::gpu::MatrixDescriptor& rhs,\n const se::gpu::OutputMatrixDescriptor& output,\n se::DeviceMemoryBase workspace, Scale alpha,\n Scale beta, se::Stream* stream,\n PrecisionConfig::Algorithm precision_algorithm,\n se::blas::AlgorithmType algorithm,\n se::blas::ComputePrecision compute_precision,\n const se::NumericOptions& numeric_options,\n se::blas::ProfileResult* profile_result,\n se::blas::CallContext context) {\n CHECK(output.transpose == se::blas::Transpose::kNoTranspose);\n PrimitiveType lhs_type = primitive_util::NativeToPrimitiveType();\n PrimitiveType output_type = primitive_util::NativeToPrimitiveType();\n TF_ASSIGN_OR_RETURN(\n se::blas::ComputationType computation_type,\n se::gpu::GetBlasComputationType(precision_algorithm, lhs_type,\n output_type, compute_precision));\n se::DeviceMemory output_data(output.data);\n auto* blas = stream->parent()->AsBlas();\n if (blas == nullptr) {\n return absl::InternalError(\"No Blas support for stream\");\n }\n se::blas::BlasSupport::ScopedWorkspace scoped_workspace(blas, &workspace);\n if (output.batch_size != 1) {\n return blas->BlasGemmStridedBatchedWithAlgorithm(\n stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k,\n alpha, lhs.cast(), lhs.leading_dim_stride, lhs.batch_stride,\n rhs.cast(), rhs.leading_dim_stride, rhs.batch_stride, beta,\n &output_data, output.leading_dim_stride, output.batch_stride,\n output.batch_size, computation_type, algorithm, numeric_options,\n profile_result, context);\n } else {\n return blas->BlasGemmWithAlgorithm(\n stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k,\n alpha, lhs.cast(), lhs.leading_dim_stride, rhs.cast(),\n rhs.leading_dim_stride, beta, &output_data, output.leading_dim_stride,\n computation_type, algorithm, numeric_options, profile_result, context);\n }\n}\ntemplate \nabsl::Status DoGemm(const se::gpu::MatrixDescriptor& lhs,\n const se::gpu::MatrixDescriptor& rhs,\n const se::gpu::OutputMatrixDescriptor& output,\n se::DeviceMemoryBase workspace, Scale alpha, Scale beta,\n se::Stream* stream,\n PrecisionConfig::Algorithm precision_algorithm,\n std::optional algorithm,\n se::blas::ComputePrecision compute_precision,\n const se::NumericOptions& numeric_options,\n se::blas::ProfileResult* profile_result,\n se::blas::CallContext context) {\n CHECK(output.transpose == se::blas::Transpose::kNoTranspose);\n se::DeviceMemory output_data(output.data);\n auto* blas = stream->parent()->AsBlas();\n if (blas == nullptr) {\n return absl::InternalError(\"No Blas support for stream\");\n }\n if (algorithm) {\n return DoGemmWithAlgorithm(\n lhs, rhs, output, workspace, alpha, beta, stream, precision_algorithm,\n *algorithm, compute_precision, numeric_options, profile_result,\n context);\n }\n se::blas::BlasSupport::ScopedWorkspace scoped_workspace(blas, &workspace);\n if (output.batch_size != 1) {\n return blas->BlasGemmStridedBatched(\n stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k,\n alpha, lhs.cast(), lhs.leading_dim_stride, lhs.batch_stride,\n rhs.cast(), rhs.leading_dim_stride, rhs.batch_stride, beta,\n &output_data, output.leading_dim_stride, output.batch_stride,\n output.batch_size, numeric_options, context);\n }\n return blas->BlasGemm(stream, lhs.transpose, rhs.transpose, output.m,\n output.n, output.k, alpha, lhs.cast(),\n lhs.leading_dim_stride, rhs.cast(),\n rhs.leading_dim_stride, beta, &output_data,\n output.leading_dim_stride, numeric_options, context);\n}\n} \nabsl::Status RunGemm(const GemmConfig& config, se::DeviceMemoryBase lhs_buffer,\n se::DeviceMemoryBase rhs_buffer,\n se::DeviceMemoryBase output_buffer,\n se::DeviceMemoryBase workspace_buffer,\n bool deterministic_ops, se::Stream* stream,\n std::optional algorithm,\n se::blas::ProfileResult* profile_result) {\n VLOG(2) << \"Executing a GemmThunk\";\n TF_ASSIGN_OR_RETURN(\n GemmConfig::DescriptorsTuple desc,\n config.GetMatrixDescriptors(lhs_buffer, rhs_buffer, output_buffer));\n se::NumericOptions numeric_options{\n deterministic_ops,\n IsTf32Allowed(config.precision_algorithm,\n config.compute_precision)};\n if (!algorithm) algorithm = config.algorithm;\n se::blas::CallContext context = se::blas::CallContext::kNone;\n if (config.grad_x) {\n context = desc.operands_swapped ? se::blas::CallContext::kBackpropInput2\n : se::blas::CallContext::kBackpropInput1;\n }\n if (config.grad_y) {\n context = desc.operands_swapped ? se::blas::CallContext::kBackpropInput1\n : se::blas::CallContext::kBackpropInput2;\n }\n std::tuple operand_types{config.lhs_layout.dtype, config.rhs_layout.dtype,\n config.output_layout.dtype};\n if (config.alpha.real() == 0.0 && config.alpha.imag() == 0.0 &&\n config.beta == 0.0) {\n return stream->MemZero(&output_buffer, output_buffer.size());\n }\n#define TYPED_GEMM(SCALENTYPE, ATYPE, BTYPE, CTYPE) \\\n if (operand_types == std::make_tuple(ATYPE, BTYPE, CTYPE)) { \\\n using NativeScaleType = \\\n primitive_util::PrimitiveTypeToNative::type; \\\n using NativeAType = primitive_util::PrimitiveTypeToNative::type; \\\n using NativeCType = primitive_util::PrimitiveTypeToNative::type; \\\n return DoGemm( \\\n desc.lhs, desc.rhs, desc.output, workspace_buffer, \\\n static_cast(config.alpha.real()), \\\n static_cast(config.beta), stream, \\\n config.precision_algorithm, algorithm, config.compute_precision, \\\n numeric_options, profile_result, context); \\\n }\n#define TYPED_GEMM_COMPLEX(SCALENTYPE, ATYPE, BTYPE, CTYPE) \\\n if (operand_types == std::make_tuple(ATYPE, BTYPE, CTYPE)) { \\\n using NativeScaleType = \\\n primitive_util::PrimitiveTypeToNative::type; \\\n using NativeAType = primitive_util::PrimitiveTypeToNative::type; \\\n using NativeCType = primitive_util::PrimitiveTypeToNative::type; \\\n return DoGemm( \\\n desc.lhs, desc.rhs, desc.output, workspace_buffer, \\\n static_cast(config.alpha), \\\n static_cast(config.beta), stream, \\\n config.precision_algorithm, algorithm, config.compute_precision, \\\n numeric_options, profile_result, context); \\\n }\n if (config.output_layout.dtype == S32) {\n if (!algorithm) algorithm = se::blas::kDefaultGemmAlgo;\n return DoGemmWithAlgorithm(\n desc.lhs, desc.rhs, desc.output, workspace_buffer,\n static_cast(config.alpha.real()),\n static_cast(config.beta), stream, PrecisionConfig::ALG_UNSET,\n *algorithm, se::blas::kDefaultComputePrecision, numeric_options,\n profile_result, context);\n }\n TYPED_GEMM(F32, BF16, BF16, BF16)\n TYPED_GEMM(F32, F16, F16, F16)\n TYPED_GEMM(F32, S8, S8, F32)\n TYPED_GEMM(F32, BF16, BF16, F32)\n TYPED_GEMM(F32, F16, F16, F32)\n TYPED_GEMM(F32, F32, F32, F32)\n TYPED_GEMM(F64, F64, F64, F64)\n TYPED_GEMM_COMPLEX(C64, C64, C64, C64)\n TYPED_GEMM_COMPLEX(C128, C128, C128, C128)\n#undef TYPED_GEMM\n#undef TYPED_GEMM_COMPLEX\n return Internal(\n \"Unexpected GEMM dtype: %s %s %s\",\n primitive_util::LowercasePrimitiveTypeName(config.lhs_layout.dtype),\n primitive_util::LowercasePrimitiveTypeName(config.rhs_layout.dtype),\n primitive_util::LowercasePrimitiveTypeName(config.output_layout.dtype));\n} \nnamespace gpublas_lt {\nabsl::StatusOr EpilogueAddsVectorBias(\n GemmBackendConfig_Epilogue epilogue) {\n switch (epilogue) {\n case GemmBackendConfig::DEFAULT:\n case GemmBackendConfig::RELU:\n case GemmBackendConfig::GELU:\n case GemmBackendConfig::GELU_AUX:\n return false;\n case GemmBackendConfig::BIAS:\n case GemmBackendConfig::BIAS_RELU:\n case GemmBackendConfig::BIAS_GELU:\n case GemmBackendConfig::BIAS_GELU_AUX:\n return true;\n default:\n return Internal(\"Unknown Epilogue.\");\n }\n}\nabsl::StatusOr EpilogueHasAuxiliaryOutput(\n GemmBackendConfig_Epilogue epilogue) {\n switch (epilogue) {\n case GemmBackendConfig::DEFAULT:\n case GemmBackendConfig::RELU:\n case GemmBackendConfig::GELU:\n case GemmBackendConfig::BIAS:\n case GemmBackendConfig::BIAS_RELU:\n case GemmBackendConfig::BIAS_GELU:\n return false;\n case GemmBackendConfig::GELU_AUX:\n case GemmBackendConfig::BIAS_GELU_AUX:\n return true;\n default:\n return Internal(\"Unknown Epilogue.\");\n }\n}\nabsl::StatusOr AsBlasLtEpilogue(\n GemmBackendConfig_Epilogue epilogue) {\n switch (epilogue) {\n case GemmBackendConfig::DEFAULT:\n return se::gpu::BlasLt::Epilogue::kDefault;\n case GemmBackendConfig::RELU:\n return se::gpu::BlasLt::Epilogue::kReLU;\n case GemmBackendConfig::GELU:\n return se::gpu::BlasLt::Epilogue::kGELU;\n case GemmBackendConfig::GELU_AUX:\n return se::gpu::BlasLt::Epilogue::kGELUWithAux;\n case GemmBackendConfig::BIAS:\n return se::gpu::BlasLt::Epilogue::kBias;\n case GemmBackendConfig::BIAS_RELU:\n return se::gpu::BlasLt::Epilogue::kBiasThenReLU;\n case GemmBackendConfig::BIAS_GELU:\n return se::gpu::BlasLt::Epilogue::kBiasThenGELU;\n case GemmBackendConfig::BIAS_GELU_AUX:\n return se::gpu::BlasLt::Epilogue::kBiasThenGELUWithAux;\n default:\n return Internal(\"unexpected epilogue value\");\n }\n}\n} \n absl::StatusOr TritonGemmConfig::FromProto(\n const AutotuneResult::TritonGemmKey& proto) {\n TF_RET_CHECK(proto.block_m() > 0);\n TF_RET_CHECK(proto.block_n() > 0);\n TF_RET_CHECK(proto.block_k() > 0);\n TF_RET_CHECK(proto.split_k() > 0);\n TF_RET_CHECK(proto.num_stages() > 0);\n TF_RET_CHECK(proto.num_warps() > 0);\n TF_RET_CHECK(proto.num_ctas() > 0);\n return TritonGemmConfig(proto.block_m(), proto.block_n(), proto.block_k(),\n proto.split_k(), proto.num_stages(),\n proto.num_warps(), proto.num_ctas());\n}\nAutotuneResult::TritonGemmKey TritonGemmConfig::ToProto() const {\n AutotuneResult::TritonGemmKey key;\n key.set_block_m(block_m);\n key.set_block_n(block_n);\n key.set_block_k(block_k);\n key.set_split_k(split_k);\n key.set_num_stages(num_stages);\n key.set_num_warps(num_warps);\n key.set_num_ctas(num_ctas);\n return key;\n}\nstd::string TritonGemmConfig::ToString() const {\n return absl::StrCat(\"{block_m:\", block_m, \",block_n:\", block_n,\n \",block_k:\", block_k, \",split_k:\", split_k,\n \",num_stages:\", num_stages, \",num_warps:\", num_warps,\n \",num_ctas:\", num_ctas, \"}\");\n}\nabsl::StatusOr IsMatrixMultiplicationTooSmallForRewriting(\n const HloInstruction& dot, int64_t threshold) {\n CHECK_EQ(dot.opcode(), HloOpcode::kDot);\n const Shape& lhs_shape = dot.operand(0)->shape();\n const Shape& rhs_shape = dot.operand(1)->shape();\n const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();\n int64_t contracting_size = 1;\n for (int64_t dim : dot_dims.lhs_contracting_dimensions()) {\n contracting_size *= lhs_shape.dimensions(dim);\n }\n TF_ASSIGN_OR_RETURN(\n std::vector lhs_non_contracting_dims,\n GetNonContractingDims(lhs_shape, dot_dims.lhs_batch_dimensions(),\n dot_dims.lhs_contracting_dimensions()));\n int64_t lhs_non_contracting_size = 1;\n for (int64_t dim : lhs_non_contracting_dims) {\n lhs_non_contracting_size *= lhs_shape.dimensions(dim);\n }\n TF_ASSIGN_OR_RETURN(\n std::vector rhs_non_contracting_dims,\n GetNonContractingDims(rhs_shape, dot_dims.rhs_batch_dimensions(),\n dot_dims.rhs_contracting_dimensions()));\n int64_t rhs_non_contracting_size = 1;\n for (int64_t dim : rhs_non_contracting_dims) {\n rhs_non_contracting_size *= rhs_shape.dimensions(dim);\n }\n return (rhs_non_contracting_size + lhs_non_contracting_size) *\n contracting_size <\n threshold;\n}\nbool IsDotSupportedByClassicalEmitters(const HloInstruction& dot) {\n if (!algorithm_util::IsSupportedByElementalIrEmitter(\n dot.precision_config().algorithm())) {\n return false;\n }\n switch (dot.shape().element_type()) {\n case F16:\n case F32:\n case BF16:\n return true;\n default:\n return false;\n }\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/matmul_utils.h\"\n#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/shape.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nusing ::testing::ElementsAre;\nusing ::tsl::testing::IsOkAndHolds;\nTEST(GetNonContractingDimsTest, Valid) {\n Shape shape = ParseShape(\"f32[1,2,3,4,5,6]\").value();\n EXPECT_THAT(GetNonContractingDims(shape, {4},\n {1, 5}),\n IsOkAndHolds(ElementsAre(0, 2, 3)));\n}\nusing CanFoldTransposeOperandIntoDotTest = HloTestBase;\nTEST_F(CanFoldTransposeOperandIntoDotTest, ArgTransposeFoldGemm) {\n const char* hlo_text = R\"(\nHloModule ArgTransposeFoldGemm\nENTRY AddDotsFunc {\n x = f32[3,2] parameter(0)\n y = f32[3,4] parameter(1)\n x_transposed = f32[2,3] transpose(x), dimensions={1, 0}\n ROOT dot_a = f32[2,4] dot(x_transposed, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));\n}\nTEST_F(CanFoldTransposeOperandIntoDotTest, BatchedArgRowColTransposeFoldGemm) {\n const char* hlo_text = R\"(\nHloModule BatchedArgRowColTransposeFoldGemm\nENTRY AddDotsFunc {\n x = f32[5,3,2] parameter(0)\n y = f32[5,3,4] parameter(1)\n x_transposed = f32[5,2,3] transpose(x), dimensions={0, 2, 1}\n ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));\n}\nTEST_F(CanFoldTransposeOperandIntoDotTest, BatchRowTransposeFoldGemm) {\n const char* hlo_text = R\"(\nHloModule BatchRowTransposeFoldCheck\nENTRY AddDotsFunc {\n x = f32[2,5,3] parameter(0)\n y = f32[5,3,4] parameter(1)\n x_transposed = f32[5,2,3] transpose(x), dimensions={1, 0, 2}\n ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));\n}\nTEST_F(CanFoldTransposeOperandIntoDotTest,\n BatchFromMinorDimTransposeDoesntFold) {\n const char* hlo_text = R\"(\nHloModule BatchFromMinorDimTransposeDoesntFold\nENTRY AddDotsFunc {\n x = f32[3,2,5] parameter(0)\n y = f32[5,3,4] parameter(1)\n x_transposed = f32[5,2,3] transpose(x), dimensions={2, 1, 0}\n ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(false));\n}\nTEST_F(CanFoldTransposeOperandIntoDotTest,\n TransposedNonContractingDimsDontFold) {\n const char* hlo_text = R\"(\nHloModule TransposedNonContractingDimsDontFold\nENTRY AddDotsFunc {\n x = f32[5,3,4]{2,1,0} parameter(1)\n y = f32[5,2,6,3]{3,1,2,0} parameter(0)\n y_transposed = f32[5,6,2,3]{3,2,1,0} transpose(y), dimensions={0, 2, 1, 3}\n ROOT dot_a = f32[5,4,6,2]{3,2,1,0} dot(x, y_transposed), lhs_contracting_dims={1}, rhs_contracting_dims={3}, lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 1), IsOkAndHolds(false));\n}\nstruct GetBatchRowColumnShapeTestParams {\n absl::string_view shape;\n std::vector batch_dims;\n std::vector row_dims;\n std::vector col_dims;\n absl::string_view expected_shape;\n};\nusing GetBatchRowColumnShapeTest =\n ::testing::TestWithParam;\nTEST_P(GetBatchRowColumnShapeTest, ValidShape) {\n const GetBatchRowColumnShapeTestParams& params = GetParam();\n Shape shape = ParseShape(params.shape).value();\n EXPECT_THAT(GetBatchRowColumnShape(shape, params.batch_dims, params.row_dims,\n params.col_dims),\n IsOkAndHolds(ParseShape(params.expected_shape).value()));\n}\nINSTANTIATE_TEST_SUITE_P(\n GetBatchRowColumnShapeTests, GetBatchRowColumnShapeTest,\n ::testing::ValuesIn({\n {\"f32[3,4]{1,0}\", {}, {0}, {1},\n \"f32[1,3,4]{2,1,0}\"},\n {\"f32[3,4]{0,1}\", {}, {0}, {1}, \"f32[1,3,4]{1,2,0}\"},\n {\"f32[3,4]{1,0}\", {}, {1}, {0}, \"f32[1,4,3]{1,2,0}\"},\n {\"f32[3,4,5]{2,1,0}\", {0}, {1}, {2}, \"f32[3,4,5]{2,1,0}\"},\n {\"f32[3,4,5]{2,1,0}\", {2}, {1}, {0}, \"f32[5,4,3]{0,1,2}\"},\n {\"f32[3,4,5,6,7,8]{5,2,4,1,3,0}\",\n {0, 3},\n {1, 4},\n {2, 5},\n \"f32[18,28,40]{2,1,0}\"},\n }));\nTEST(GetBatchRowColumnShapeTest, BatchRowsColsInterleaved) {\n Shape shape = ParseShape(\"f32[3,4,5,6,7,8]{5,4,3,2,1,0}\").value();\n auto result =\n GetBatchRowColumnShape(shape, {0, 3},\n {1, 4}, {2, 5});\n EXPECT_FALSE(result.ok());\n}\nTEST(GetBatchRowColumnShapeTest, WrongPhysicalOrder) {\n Shape shape = ParseShape(\"f32[3,4,5,6]{3,2,0,1}\").value();\n auto result = GetBatchRowColumnShape(shape, {0, 1},\n {2}, {3});\n EXPECT_FALSE(result.ok());\n}\nusing Order = MatrixLayout::Order;\nstruct GetMatrixLayoutTestParams {\n absl::string_view shape;\n int64_t batch_size;\n int64_t num_rows;\n int64_t num_cols;\n Order order;\n int64_t leading_dim_stride;\n int64_t batch_stride;\n};\nusing GetMatrixLayoutTest = ::testing::TestWithParam;\nTEST_P(GetMatrixLayoutTest, ValidShape) {\n const GetMatrixLayoutTestParams& params = GetParam();\n Shape shape = ParseShape(params.shape).value();\n MatrixLayout result = MatrixLayout::For(shape).value();\n EXPECT_EQ(result.batch_size, params.batch_size);\n EXPECT_EQ(result.num_rows, params.num_rows);\n EXPECT_EQ(result.num_cols, params.num_cols);\n EXPECT_EQ(result.order, params.order);\n EXPECT_EQ(result.leading_dim_stride, params.leading_dim_stride);\n EXPECT_EQ(result.batch_stride, params.batch_stride);\n}\nINSTANTIATE_TEST_SUITE_P(\n GetMatrixLayoutTests, GetMatrixLayoutTest,\n ::testing::ValuesIn({\n {\"f32[3,4,5]{2,1,0}\", 3, 4, 5,\n Order::kRowMajor, 5,\n 20},\n {\"f32[3,4,5]{1,2,0}\", 3, 4, 5, Order::kColumnMajor, 4, 20},\n {\"f32[3,4,5]{2,0,1}\", 3, 4, 5, Order::kRowMajor, 15, 5},\n {\"f32[3,4,5]{1,0,2}\", 3, 4, 5, Order::kColumnMajor, 12, 4},\n }));\nTEST(GetMatrixLayoutTest, BatchInMostMinorPhysicalDimension) {\n Shape shape = ParseShape(\"f32[3,4,5]{0,2,1}\").value();\n EXPECT_FALSE(MatrixLayout::For(shape).ok());\n}\nusing GetMatrixSizeRewriteThresholdTest = HloTestBase;\nTEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTooSmallForRewrite) {\n const char* hlo_text = R\"(\nHloModule DotFuncModule\nENTRY DotFunc {\n x = f32[100,30,3] parameter(0)\n y = f32[100,3,3] parameter(1)\n ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),\n IsOkAndHolds(true));\n}\nTEST_F(GetMatrixSizeRewriteThresholdTest, MatMulSupportedByClassicalEmitters) {\n const char* hlo_text = R\"(\nHloModule DotFuncModule\nENTRY DotFunc {\n x = f32[100,30,3] parameter(0)\n y = f32[100,3,3] parameter(1)\n ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_TRUE(IsDotSupportedByClassicalEmitters(*dot));\n}\nTEST_F(GetMatrixSizeRewriteThresholdTest,\n MatMulUnsupportedByClassicalEmitters) {\n const char* hlo_text = R\"(\nHloModule DotFuncModule\nENTRY DotFunc {\n x = s8[100,30,3] parameter(0)\n y = s8[100,3,3] parameter(1)\n ROOT dot = s32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_FALSE(IsDotSupportedByClassicalEmitters(*dot));\n}\nTEST_F(GetMatrixSizeRewriteThresholdTest, MatMulLeftLargeEnoughForRewrite) {\n const char* hlo_text = R\"(\nHloModule DotFuncModule\nENTRY DotFunc {\n x = f32[50,2] parameter(0)\n y = f32[2,2] parameter(1)\n ROOT dot = f32[50,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),\n IsOkAndHolds(false));\n}\nTEST_F(GetMatrixSizeRewriteThresholdTest, MatMulRightLargeEnoughForRewrite) {\n const char* hlo_text = R\"(\nHloModule DotFuncModule\nENTRY DotFunc {\n x = f32[2,2] parameter(0)\n y = f32[2,50] parameter(1)\n ROOT dot = f32[2,50] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),\n IsOkAndHolds(false));\n}\nTEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTogetherLargeEnoughForRewrite) {\n const char* hlo_text = R\"(\nHloModule DotFuncModule\nENTRY DotFunc {\n x = f32[4,16] parameter(0)\n y = f32[16,4] parameter(1)\n ROOT dot = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_text));\n auto dot = module->entry_computation()->root_instruction();\n EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),\n IsOkAndHolds(false));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/matmul_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/matmul_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1198,"cells":{"ID":{"kind":"string","value":"5e5b458c-749f-4599-ab52-44f385a4c5f4"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"stream_executor_util"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/stream_executor_util.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/stream_executor_util_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/stream_executor_util.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/base/const_init.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/time/time.h\"\n#include \"absl/types/span.h\"\n#include \"Eigen/Core\"\n#include \"xla/autotuning.pb.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/layout.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/gpu/cublas_cudnn.h\"\n#include \"xla/service/gpu/launch_dimensions.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/data_type.h\"\n#include \"xla/stream_executor/device_memory.h\"\n#include \"xla/stream_executor/dnn.h\"\n#include \"xla/stream_executor/kernel.h\"\n#include \"xla/stream_executor/kernel_spec.h\"\n#include \"xla/stream_executor/launch_dim.h\"\n#include \"xla/stream_executor/platform.h\"\n#include \"xla/stream_executor/stream.h\"\n#include \"xla/stream_executor/typed_kernel_factory.h\"\n#include \"xla/tsl/protobuf/dnn.pb.h\"\n#include \"xla/tsl/util/proto/proto_utils.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/ml_dtypes.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nabsl::StatusOr GetDnnVersionInfo(\n stream_executor::StreamExecutor* stream_exec) {\n if (!stream_exec) {\n return absl::InvalidArgumentError(\"StreamExecutor is null\");\n }\n stream_executor::dnn::DnnSupport* dnn = stream_exec->AsDnn();\n if (!dnn) {\n return absl::FailedPreconditionError(\n \"DNN library initialization failed. Look at the errors above for more \"\n \"details.\");\n }\n return dnn->GetVersion();\n}\nse::dnn::VersionInfo GetDnnVersionInfoOrDefault(\n stream_executor::StreamExecutor* stream_exec,\n se::dnn::VersionInfo fallback_version) {\n return GetDnnVersionInfo(stream_exec).value_or(fallback_version);\n}\nnamespace {\nusing se::dnn::DataLayout;\nusing se::dnn::DataLayoutString;\nusing se::dnn::FilterLayout;\nusing se::dnn::FilterLayoutString;\nint64_t FindMissingDnum(absl::Span vals) {\n for (int i = 0; i < vals.size(); i++) {\n if (!absl::c_linear_search(vals, i)) {\n return i;\n }\n }\n return vals.size();\n}\nabsl::StatusOr DataLayoutToXlaLayout(\n DataLayout data_layout, int64_t batch_dimension, int64_t feature_dimension,\n absl::Span spatial_dimensions) {\n std::vector layout;\n switch (data_layout) {\n case DataLayout::kBatchDepthYX: \n layout.push_back(batch_dimension);\n layout.push_back(feature_dimension);\n layout.insert(layout.end(), spatial_dimensions.begin(),\n spatial_dimensions.end());\n break;\n case DataLayout::kBatchDepthYX4: \n case DataLayout::kBatchDepthYX32: \n layout.push_back(batch_dimension);\n layout.push_back(feature_dimension);\n layout.insert(layout.end(), spatial_dimensions.begin(),\n spatial_dimensions.end());\n layout.push_back(FindMissingDnum(layout));\n break;\n case DataLayout::kBatchYXDepth: \n layout.push_back(batch_dimension);\n layout.insert(layout.end(), spatial_dimensions.begin(),\n spatial_dimensions.end());\n layout.push_back(feature_dimension);\n break;\n default:\n return Internal(\"Invalid layout %s\", DataLayoutString(data_layout));\n }\n return LayoutUtil::MakeLayoutFromMajorToMinor(layout);\n}\n} \nabsl::StatusOr>\nStreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,\n DataLayout input, FilterLayout filter,\n DataLayout output) {\n TF_ASSIGN_OR_RETURN(\n Layout input_layout,\n DataLayoutToXlaLayout(input, dnums.input_batch_dimension(),\n dnums.input_feature_dimension(),\n dnums.input_spatial_dimensions()));\n TF_ASSIGN_OR_RETURN(\n Layout output_layout,\n DataLayoutToXlaLayout(input, dnums.output_batch_dimension(),\n dnums.output_feature_dimension(),\n dnums.output_spatial_dimensions()));\n std::vector filter_layout;\n switch (filter) {\n case FilterLayout::kOutputInputYX: \n filter_layout.push_back(dnums.kernel_output_feature_dimension());\n filter_layout.push_back(dnums.kernel_input_feature_dimension());\n filter_layout.insert(filter_layout.end(),\n dnums.kernel_spatial_dimensions().begin(),\n dnums.kernel_spatial_dimensions().end());\n break;\n case FilterLayout::kOutputInputYX4: \n filter_layout.push_back(dnums.kernel_output_feature_dimension());\n filter_layout.push_back(dnums.kernel_input_feature_dimension());\n filter_layout.insert(filter_layout.end(),\n dnums.kernel_spatial_dimensions().begin(),\n dnums.kernel_spatial_dimensions().end());\n filter_layout.push_back(FindMissingDnum(filter_layout));\n break;\n case FilterLayout::kOutputYXInput: \n filter_layout.push_back(dnums.kernel_output_feature_dimension());\n filter_layout.insert(filter_layout.end(),\n dnums.kernel_spatial_dimensions().begin(),\n dnums.kernel_spatial_dimensions().end());\n filter_layout.push_back(dnums.kernel_input_feature_dimension());\n break;\n default:\n return Internal(\"Invalid filter layout %s for conv with dnums %s,\",\n FilterLayoutString(filter),\n ConvolutionDimensionNumbersToString(dnums));\n }\n return std::make_tuple(input_layout,\n LayoutUtil::MakeLayoutFromMajorToMinor(filter_layout),\n output_layout);\n}\nabsl::StatusOr>\nXlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,\n const Shape& input, const Shape& filter,\n const Shape& output) {\n CHECK(input.has_layout());\n CHECK(filter.has_layout());\n CHECK(output.has_layout());\n Layout nchw_input, nchw_filter, nchw_output;\n std::tie(nchw_input, nchw_filter, nchw_output) =\n StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX,\n FilterLayout::kOutputInputYX,\n DataLayout::kBatchDepthYX)\n .value();\n Layout nchw_vect_input, nchw_vect_filter, nchw_vect_output;\n std::tie(nchw_vect_input, nchw_vect_filter, nchw_vect_output) =\n StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX4,\n FilterLayout::kOutputInputYX4,\n DataLayout::kBatchDepthYX4)\n .value();\n Layout nhwc_input, nhwc_filter, nhwc_output;\n std::tie(nhwc_input, nhwc_filter, nhwc_output) =\n StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchYXDepth,\n FilterLayout::kOutputYXInput,\n DataLayout::kBatchYXDepth)\n .value();\n DataLayout input_layout;\n if (LayoutUtil::Equal(input.layout(), nchw_input)) {\n input_layout = DataLayout::kBatchDepthYX;\n } else if (LayoutUtil::Equal(input.layout(), nchw_vect_input)) {\n int64_t vect_size = input.dimensions(input.layout().minor_to_major(0));\n if (vect_size == 4) {\n input_layout = DataLayout::kBatchDepthYX4;\n } else if (vect_size == 32) {\n input_layout = DataLayout::kBatchDepthYX32;\n } else {\n return Internal(\n \"Invalid input shape %s for conv with dnums %s. Most-minor dim \"\n \"should be 4 or 32, but was %d.\",\n ShapeUtil::HumanStringWithLayout(input),\n ConvolutionDimensionNumbersToString(dnums), vect_size);\n }\n } else if (LayoutUtil::Equal(input.layout(), nhwc_input)) {\n input_layout = DataLayout::kBatchYXDepth;\n } else {\n return Internal(\n \"Invalid input layout %s for conv with dnums %s; expected one of (%s, \"\n \"%s, %s)\",\n LayoutUtil::HumanString(input.layout()),\n ConvolutionDimensionNumbersToString(dnums), nchw_input.ToString(),\n nchw_vect_input.ToString(), nhwc_input.ToString());\n }\n FilterLayout filter_layout;\n if (LayoutUtil::Equal(filter.layout(), nchw_filter)) {\n filter_layout = FilterLayout::kOutputInputYX;\n } else if (LayoutUtil::Equal(filter.layout(), nchw_vect_filter)) {\n int64_t vect_size = filter.dimensions(filter.layout().minor_to_major(0));\n if (vect_size == 4) {\n filter_layout = FilterLayout::kOutputInputYX4;\n } else if (vect_size == 32) {\n filter_layout = FilterLayout::kOutputInputYX32;\n } else {\n return Internal(\n \"Invalid filter shape %s for conv with dnums %s. Most-minor dim \"\n \"should be 4 or 32, but was %d.\",\n ShapeUtil::HumanStringWithLayout(filter),\n ConvolutionDimensionNumbersToString(dnums), vect_size);\n }\n } else if (LayoutUtil::Equal(filter.layout(), nhwc_filter)) {\n filter_layout = FilterLayout::kOutputYXInput;\n } else {\n return Internal(\n \"Invalid filter layout %s for conv with dnums %s, expected one of (%s, \"\n \"%s, %s)\",\n LayoutUtil::HumanString(filter.layout()),\n ConvolutionDimensionNumbersToString(dnums), nchw_filter.ToString(),\n nchw_vect_filter.ToString(), nhwc_filter.ToString());\n }\n DataLayout output_layout;\n if (LayoutUtil::Equal(output.layout(), nchw_output)) {\n output_layout = DataLayout::kBatchDepthYX;\n } else if (LayoutUtil::Equal(output.layout(), nchw_vect_output)) {\n int64_t vect_size = output.dimensions(output.layout().minor_to_major(0));\n if (vect_size == 4) {\n output_layout = DataLayout::kBatchDepthYX4;\n } else if (vect_size == 32) {\n output_layout = DataLayout::kBatchDepthYX32;\n } else {\n return Internal(\n \"Invalid output shape %s for conv with dnums %s. Most-minor dim \"\n \"should be 4 or 32, but was %d.\",\n ShapeUtil::HumanStringWithLayout(output),\n ConvolutionDimensionNumbersToString(dnums), vect_size);\n }\n } else if (LayoutUtil::Equal(output.layout(), nhwc_output)) {\n output_layout = DataLayout::kBatchYXDepth;\n } else {\n return Internal(\"Invalid output layout %s for conv with dnums %s\",\n LayoutUtil::HumanString(output.layout()),\n ConvolutionDimensionNumbersToString(dnums));\n }\n return std::make_tuple(input_layout, filter_layout, output_layout);\n}\nstatic std::optional FindVectorizedDim(int64_t rank, int64_t d0,\n int64_t d1,\n absl::Span ds) {\n for (int64_t i = 0; i < rank; i++) {\n if (i == d0 || i == d1 || absl::c_linear_search(ds, i)) {\n continue;\n }\n return i;\n }\n return std::nullopt;\n}\nstd::tuple, std::optional,\n std::optional>\nFindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,\n const Shape& input, const Shape& filter,\n const Shape& output) {\n return {\n FindVectorizedDim(input.dimensions_size(), dnums.input_batch_dimension(),\n dnums.input_feature_dimension(),\n dnums.input_spatial_dimensions()),\n FindVectorizedDim(filter.dimensions_size(),\n dnums.kernel_input_feature_dimension(),\n dnums.kernel_output_feature_dimension(),\n dnums.kernel_spatial_dimensions()),\n FindVectorizedDim(\n output.dimensions_size(), dnums.output_batch_dimension(),\n dnums.output_feature_dimension(), dnums.output_spatial_dimensions()),\n };\n}\nabsl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec) {\n static absl::Mutex mu(absl::kConstInit);\n static auto* mutexes =\n new std::map,\n absl::Mutex>();\n absl::MutexLock global_lock(&mu);\n auto it = mutexes\n ->emplace(std::piecewise_construct,\n std::make_tuple(stream_exec->GetPlatform(),\n stream_exec->device_ordinal()),\n std::make_tuple())\n .first;\n return it->second;\n}\nabsl::StatusOr> CreateKernel(\n absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx,\n absl::Span cubin_data, se::StreamExecutor* stream_exec,\n uint32_t shared_mem_bytes) {\n se::MultiKernelLoaderSpec loader_spec(num_args);\n loader_spec.AddCudaPtxInMemory(ptx, kernel_name);\n if (!cubin_data.empty()) {\n loader_spec.AddCudaCubinInMemory(cubin_data, kernel_name);\n }\n TF_ASSIGN_OR_RETURN(std::unique_ptr kernel,\n stream_exec->LoadKernel(loader_spec));\n se::KernelMetadata m;\n m.set_shared_memory_bytes(shared_mem_bytes);\n kernel->set_metadata(m);\n return kernel;\n}\nabsl::Status ExecuteKernelOnStream(const se::Kernel& kernel,\n absl::Span args,\n const LaunchDimensions& dims,\n se::Stream* stream) {\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr kernel_args,\n se::PackKernelArgs(args, kernel.metadata()));\n return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),\n kernel, *kernel_args);\n}\nabsl::Status ExecuteKernelOnStream(const se::Kernel& kernel,\n absl::Span args,\n const LaunchDimensions& dims,\n const se::ClusterDim& cluster_dim,\n se::Stream* stream) {\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr kernel_args,\n se::PackKernelArgs(args, kernel.metadata()));\n return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),\n cluster_dim, kernel, *kernel_args);\n}\ntemplate \ntypename std::enable_if::value,\n T>::type static UniformDistribution(T lhs, T rhs,\n Generator* gen) =\n delete;\ntemplate \ntypename std::enable_if::value,\n T>::type static UniformDistribution(T lhs, T rhs,\n Generator* gen) {\n return std::uniform_real_distribution(lhs, rhs)(*gen);\n}\nnamespace repeat_buffer_kernel {\nvoid* kernel();\n}\ntemplate \nstatic void InitializeTypedBuffer(se::Stream* stream,\n se::DeviceMemoryBase buffer,\n int64_t* rng_state) {\n constexpr int host_buffer_size = 10069;\n static std::vector* host_buffer = [&] {\n auto* ret = new std::vector(host_buffer_size);\n std::mt19937 gen;\n for (auto& element : *ret) {\n constexpr bool kIsIntegral = std::numeric_limits::is_integer;\n constexpr bool kIsLowRange =\n !kIsIntegral && std::numeric_limits::max_exponent <=\n std::numeric_limits::max_exponent;\n using RandomType = typename std::conditional,\n double, float>::type;\n auto upper_bound = RandomType(kIsLowRange ? 0.1 : 1.0);\n auto rand_val = UniformDistribution(RandomType(0), upper_bound, &gen);\n element = T(kIsIntegral ? rand_val + 0.5 : rand_val);\n }\n return ret;\n }();\n CHECK_EQ(0, buffer.size() % sizeof(T));\n int64_t elements_to_fill = buffer.size() / sizeof(T);\n int64_t host_index = *rng_state;\n CHECK_LT(host_index, host_buffer_size);\n *rng_state = (*rng_state + elements_to_fill) % host_buffer_size;\n int64_t first_size =\n std::min(host_buffer_size - host_index, elements_to_fill);\n TF_CHECK_OK(stream->Memcpy(&buffer, host_buffer->data() + host_index,\n first_size * sizeof(T)));\n elements_to_fill -= first_size;\n if (elements_to_fill == 0) {\n return;\n }\n int64_t second_size = std::min(host_index, elements_to_fill);\n CHECK_LE(first_size + second_size, host_buffer_size);\n se::DeviceMemoryBase mem =\n buffer.GetByteSlice(first_size * sizeof(T), second_size * sizeof(T));\n TF_CHECK_OK(stream->Memcpy(&mem, host_buffer->data(), mem.size()));\n elements_to_fill -= second_size;\n if (elements_to_fill == 0) {\n return;\n }\n#ifdef GOOGLE_CUDA\n CHECK_EQ(elements_to_fill, buffer.size() / sizeof(T) - host_buffer_size);\n se::StreamExecutor* executor = stream->parent();\n auto kernel =\n se::TypedKernelFactory::Create(\n executor, \"RepeatBufferKernel\", repeat_buffer_kernel::kernel());\n if (!kernel.ok()) {\n LOG(FATAL) << \"Could not create RepeatBufferKernel: \" << kernel.status();\n }\n constexpr int64_t host_buffer_bytes = host_buffer_size * sizeof(T);\n constexpr int threads_per_block = 256;\n constexpr int blocks_per_grid =\n (host_buffer_bytes + threads_per_block - 1) / threads_per_block;\n TF_CHECK_OK(stream->ThenLaunch(se::ThreadDim(threads_per_block, 1, 1),\n se::BlockDim(blocks_per_grid, 1, 1), *kernel,\n buffer, host_buffer_bytes,\n static_cast(buffer.size())));\n#endif\n}\nvoid InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,\n int64_t* rng_state, se::DeviceMemoryBase buffer) {\n return primitive_util::PrimitiveTypeSwitch(\n [&](auto primitive_type_constant) -> void {\n if constexpr (primitive_util::IsFloatingPointType(\n primitive_type_constant) ||\n primitive_util::IsIntegralType(primitive_type_constant)) {\n using NativeT = typename primitive_util::PrimitiveTypeToNative<\n primitive_type_constant>::type;\n return InitializeTypedBuffer(stream, buffer, rng_state);\n }\n if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {\n using NativeT = typename primitive_util::PrimitiveTypeToNative<\n primitive_type_constant>::type;\n return InitializeTypedBuffer(\n stream, buffer, rng_state);\n }\n if constexpr (primitive_type_constant == PRED) {\n return InitializeTypedBuffer(stream, buffer, rng_state);\n }\n LOG(FATAL) << \"Unexpected type: \"\n << primitive_util::LowercasePrimitiveTypeName(buffer_type);\n },\n buffer_type);\n}\nabsl::StatusOr GetDNNConvKindFromCudnnConvKind(\n CudnnConvKind kind) {\n switch (kind) {\n case CudnnConvKind::kBackwardFilter:\n return se::dnn::BACKWARD_FILTER;\n case CudnnConvKind::kBackwardInput:\n return se::dnn::BACKWARD_DATA;\n case CudnnConvKind::kForward:\n return se::dnn::FORWARD;\n case CudnnConvKind::kForwardActivation:\n return se::dnn::FORWARD_BIAS_ACTIVATION;\n case CudnnConvKind::kForwardGraph:\n return se::dnn::FORWARD_GRAPH;\n default:\n break;\n }\n return Internal(\"Unexpected convolution kind\");\n}\nabsl::StatusOr GetDNNNormKindFromCudnnNormKind(\n CudnnNormKind kind) {\n switch (kind) {\n case CudnnNormKind::kLayerForwardInfer:\n return se::dnn::LAYER_FWD_INFER;\n case CudnnNormKind::kLayerForwardTrain:\n return se::dnn::LAYER_FWD_TRAIN;\n case CudnnNormKind::kLayerBackward:\n return se::dnn::LAYER_BWD;\n default:\n return Internal(\"Unexpected norm kind\");\n }\n}\nabsl::StatusOr GetDNNFmhaMaskKindFromCudnnFmhaMaskKind(\n CudnnfMHAMaskKind kind) {\n switch (kind) {\n case CudnnfMHAMaskKind::kNoMask:\n return se::dnn::NO_MASK;\n case CudnnfMHAMaskKind::kPadding:\n return se::dnn::PADDING;\n case CudnnfMHAMaskKind::kCausal:\n return se::dnn::CAUSAL;\n case CudnnfMHAMaskKind::kPaddingCausal:\n return se::dnn::PADDING_CAUSAL;\n case CudnnfMHAMaskKind::kAlibi:\n return se::dnn::ALIBI;\n default:\n return Internal(\"Unexpected fmha mask kind\");\n }\n}\nabsl::StatusOr GetDNNDataTypeFromPrimitiveType(\n PrimitiveType type) {\n switch (type) {\n case F16:\n return se::dnn::ToDataType::value;\n case F32:\n return se::dnn::ToDataType::value;\n case F64:\n return se::dnn::ToDataType::value;\n case S8:\n return se::dnn::ToDataType::value;\n case S32:\n return se::dnn::ToDataType::value;\n case BF16:\n return se::dnn::ToDataType::value;\n case F8E4M3FN:\n return se::dnn::ToDataType::value;\n case F8E5M2:\n return se::dnn::ToDataType::value;\n default:\n break;\n }\n return Internal(\"Unsupported datatype\");\n}\nbool RequireDeterminism(const HloModuleConfig& config) {\n return config.debug_options().xla_gpu_deterministic_ops() ||\n config.debug_options().xla_gpu_exclude_nondeterministic_ops();\n}\nnamespace {\nstd::vector KeepNonFailures(\n absl::Span profile_results) {\n std::vector filtered_results;\n absl::c_copy_if(profile_results, std::back_inserter(filtered_results),\n [](const AutotuneResult& r) {\n return !r.has_failure() ||\n r.failure().kind() == AutotuneResult::WRONG_RESULT;\n });\n return filtered_results;\n}\nabsl::Status AllAlgorithmsFailedInternalError(\n std::optional instr_str,\n absl::Span profile_results) {\n std::ostringstream msg;\n if (instr_str.has_value()) {\n msg << \"All algorithms tried for \" << instr_str.value()\n << \" failed. Falling back to default algorithm. Per-algorithm \"\n \"errors:\";\n } else {\n msg << \"All algorithms failed. Falling back to the default algorithm. \"\n << \"Per-algorithm errors:\";\n }\n for (const auto& result : profile_results) {\n msg << \"\\n \" << result.failure().msg();\n }\n return Internal(\"%s\", msg.str());\n}\nabsl::Status NoAlgorithmSuppliedInternalError(\n std::optional instr_str) {\n std::ostringstream msg;\n if (instr_str.has_value()) {\n msg << \"There are no algorithm candidates for computing: \\n \"\n << instr_str.value()\n << \"\\nThis likely means that the instruction shape is not supported by \"\n \"the target GPU library.\";\n } else {\n msg << \"There are no algorithm candidates for computing the instruction.\\n\"\n \"This likely means that the instruction shape is not supported by \"\n \"the target GPU library.\";\n }\n return Internal(\"%s\", msg.str());\n}\nvoid SortAutotuningResultsByRunTime(std::vector& results) {\n absl::c_sort(results,\n [](const AutotuneResult& lhs, const AutotuneResult& rhs) {\n return tsl::proto_utils::FromDurationProto(lhs.run_time()) <\n tsl::proto_utils::FromDurationProto(rhs.run_time());\n });\n}\nabsl::Span TopResultsWithinMeasurementError(\n std::vector& results_sorted_by_runtime) {\n constexpr absl::Duration kMeasurementError = absl::Microseconds(4);\n absl::Duration min_time = tsl::proto_utils::FromDurationProto(\n results_sorted_by_runtime.front().run_time());\n absl::Duration limit_time = min_time + kMeasurementError;\n auto limit_time_it = absl::c_find_if(\n results_sorted_by_runtime, [limit_time](const AutotuneResult& x) {\n return tsl::proto_utils::FromDurationProto(x.run_time()) > limit_time;\n });\n return absl::MakeSpan(&*results_sorted_by_runtime.begin(), &*limit_time_it);\n}\n} \nabsl::StatusOr PickBestResult(\n absl::Span profile_results,\n std::optional instr_str,\n HloModuleConfig hlo_module_config) {\n if (profile_results.empty()) {\n return NoAlgorithmSuppliedInternalError(instr_str);\n }\n std::vector filtered_results =\n KeepNonFailures(profile_results);\n if (filtered_results.empty()) {\n return AllAlgorithmsFailedInternalError(instr_str, profile_results);\n }\n if (RequireDeterminism(hlo_module_config)) {\n return *filtered_results.begin();\n }\n SortAutotuningResultsByRunTime(filtered_results);\n auto top_within_error = TopResultsWithinMeasurementError(filtered_results);\n return *absl::c_min_element(top_within_error, [](const AutotuneResult& lhs,\n const AutotuneResult& rhs) {\n return lhs.scratch_bytes() < rhs.scratch_bytes();\n });\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/stream_executor_util.h\"\n#include \n#include \n#include \n#include \"absl/status/statusor.h\"\n#include \"absl/time/time.h\"\n#include \"xla/autotuning.pb.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/tsl/util/proto/proto_utils.h\"\nnamespace xla::gpu {\nnamespace {\nstruct Result {\n int64_t run_time_ns;\n int64_t scratch_bytes;\n bool operator==(const Result& other) const {\n return other.run_time_ns == run_time_ns &&\n other.scratch_bytes == scratch_bytes;\n };\n explicit operator AutotuneResult() const {\n AutotuneResult result;\n *result.mutable_run_time() =\n tsl::proto_utils::ToDurationProto(absl::Nanoseconds(run_time_ns));\n result.set_scratch_bytes(scratch_bytes);\n return result;\n }\n};\nstatic Result ATRToResult(AutotuneResult atr) {\n return Result{.run_time_ns = absl::ToInt64Nanoseconds(\n tsl::proto_utils::FromDurationProto(atr.run_time())),\n .scratch_bytes = atr.scratch_bytes()};\n}\nstd::vector Results(const std::vector& stats) {\n std::vector results;\n for (const auto& s : stats) results.push_back(AutotuneResult(s));\n return results;\n}\nTEST(StreamExecutorTest, PickBestResult) {\n absl::StatusOr atr;\n atr = PickBestResult(Results({{9000, 0}, {1000, 0}, {16000, 0}}), \"\", {});\n EXPECT_EQ(ATRToResult(atr.value()), Result({1000, 0}));\n atr = PickBestResult(Results({{4700, 0}, {4600, 0}, {4500, 0}}), \"\", {});\n EXPECT_EQ(ATRToResult(atr.value()), Result({4500, 0}));\n atr = PickBestResult(Results({{4700, 0}, {4600, 2}, {4500, 1}}), \"\", {});\n EXPECT_EQ(ATRToResult(atr.value()), Result({4700, 0}));\n atr = PickBestResult(Results({{5000, 1}, {6000, 0}, {7500, 0}}), \"\", {});\n EXPECT_EQ(ATRToResult(atr.value()), Result({6000, 0}));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1199,"cells":{"ID":{"kind":"string","value":"adb8539d-0c2b-41c4-b736-da697e10d787"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"gpu_compiler"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_compiler.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/gpu_compiler_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/gpu_compiler.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/base/call_once.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"absl/types/variant.h\"\n#include \"llvm/ADT/DenseMap.h\"\n#include \"llvm/ADT/SmallString.h\"\n#include \"llvm/ADT/StringRef.h\"\n#include \"llvm/AsmParser/Parser.h\"\n#include \"llvm/Bitcode/BitcodeReader.h\"\n#include \"llvm/Bitcode/BitcodeWriter.h\"\n#include \"llvm/IR/Constants.h\"\n#include \"llvm/IR/DataLayout.h\"\n#include \"llvm/IR/DiagnosticInfo.h\"\n#include \"llvm/IR/DiagnosticPrinter.h\"\n#include \"llvm/IR/GlobalValue.h\"\n#include \"llvm/IR/LLVMContext.h\"\n#include \"llvm/IR/Module.h\"\n#include \"llvm/IR/Verifier.h\"\n#include \"llvm/Support/Casting.h\"\n#include \"llvm/Support/Error.h\"\n#include \"llvm/Support/raw_ostream.h\"\n#include \"llvm/Transforms/Utils/Cloning.h\"\n#include \"llvm/Transforms/Utils/SplitModule.h\"\n#include \"mlir/IR/Diagnostics.h\"\n#include \"mlir/IR/DialectRegistry.h\"\n#include \"mlir/Support/LLVM.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_module_group.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/ir/hlo_schedule.h\"\n#include \"xla/hlo/pass/hlo_pass_fix.h\"\n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/maybe_owning.h\"\n#include \"xla/service/algebraic_simplifier.h\"\n#include \"xla/service/all_gather_broadcast_reorder.h\"\n#include \"xla/service/all_gather_combiner.h\"\n#include \"xla/service/all_reduce_combiner.h\"\n#include \"xla/service/all_reduce_contiguous.h\"\n#include \"xla/service/all_reduce_folder.h\"\n#include \"xla/service/all_reduce_promotion.h\"\n#include \"xla/service/all_reduce_reassociate.h\"\n#include \"xla/service/async_collective_creator.h\"\n#include \"xla/service/batched_gather_scatter_normalizer.h\"\n#include \"xla/service/batchnorm_expander.h\"\n#include \"xla/service/bitcast_dtypes_expander.h\"\n#include \"xla/service/broadcast_canonicalizer.h\"\n#include \"xla/service/buffer_assignment.h\"\n#include \"xla/service/call_inliner.h\"\n#include \"xla/service/collective_permute_decomposer.h\"\n#include \"xla/service/collective_pipeliner.h\"\n#include \"xla/service/collective_quantizer.h\"\n#include \"xla/service/collectives_schedule_linearizer.h\"\n#include \"xla/service/comparison_expander.h\"\n#include \"xla/service/compiler.h\"\n#include \"xla/service/conditional_canonicalizer.h\"\n#include \"xla/service/conditional_simplifier.h\"\n#include \"xla/service/convert_memory_placement_to_internal_annotations.h\"\n#include \"xla/service/convert_mover.h\"\n#include \"xla/service/convolution_4d_expander.h\"\n#include \"xla/service/convolution_pred_expander.h\"\n#include \"xla/service/copy_insertion.h\"\n#include \"xla/service/cpu_gpu_shape_verifier.h\"\n#include \"xla/service/dot_decomposer.h\"\n#include \"xla/service/dot_merger.h\"\n#include \"xla/service/dump.h\"\n#include \"xla/service/dynamic_dimension_inference.h\"\n#include \"xla/service/dynamic_dimension_simplifier.h\"\n#include \"xla/service/dynamic_index_splitter.h\"\n#include \"xla/service/dynamic_padder.h\"\n#include \"xla/service/eigh_expander.h\"\n#include \"xla/service/executable.h\"\n#include \"xla/service/export_hlo.h\"\n#include \"xla/service/flatten_call_graph.h\"\n#include \"xla/service/float_normalization.h\"\n#include \"xla/service/float_support.h\"\n#include \"xla/service/gather_expander.h\"\n#include \"xla/service/gather_simplifier.h\"\n#include \"xla/service/gpu/autotuning/autotuner_util.h\"\n#include \"xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h\"\n#include \"xla/service/gpu/compile_module_to_llvm_ir.h\"\n#include \"xla/service/gpu/conv_layout_normalization.h\"\n#include \"xla/service/gpu/cublas_cudnn.h\"\n#include \"xla/service/gpu/execution_stream_assignment.h\"\n#include \"xla/service/gpu/fusion_pipeline.h\"\n#include \"xla/service/gpu/fusions/triton/triton_support.h\"\n#include \"xla/service/gpu/gpu_executable.h\"\n#include \"xla/service/gpu/gpu_float_support.h\"\n#include \"xla/service/gpu/gpu_hlo_schedule.h\"\n#include \"xla/service/gpu/gpu_latency_hiding_scheduler.h\"\n#include \"xla/service/gpu/gpu_p2p_pipeliner.h\"\n#include \"xla/service/gpu/gpu_spmd_pipeline.h\"\n#include \"xla/service/gpu/hlo_fusion_stats.h\"\n#include \"xla/service/gpu/ir_emission_utils.h\"\n#include \"xla/service/gpu/ir_emitter_context.h\"\n#include \"xla/service/gpu/ir_emitter_unnested.h\"\n#include \"xla/service/gpu/kernel_reuse_cache.h\"\n#include \"xla/service/gpu/matmul_utils.h\"\n#include \"xla/service/gpu/metrics.h\"\n#include \"xla/service/gpu/model/gpu_cost_model_stats_collection.h\"\n#include \"xla/service/gpu/model/gpu_hlo_cost_analysis.h\"\n#include \"xla/service/gpu/prepare_hlo_for_ir_emitting_pipeline.h\"\n#include \"xla/service/gpu/reduction_utils.h\"\n#include \"xla/service/gpu/runtime_intrinsics.h\"\n#include \"xla/service/gpu/stream_executor_util.h\"\n#include \"xla/service/gpu/transforms/algebraic_simplifier.h\"\n#include \"xla/service/gpu/transforms/algorithm_checker.h\"\n#include \"xla/service/gpu/transforms/all_gather_optimizer.h\"\n#include \"xla/service/gpu/transforms/all_reduce_blueconnect.h\"\n#include \"xla/service/gpu/transforms/all_reduce_splitter.h\"\n#include \"xla/service/gpu/transforms/async_collective_annotator.h\"\n#include \"xla/service/gpu/transforms/async_wrapper.h\"\n#include \"xla/service/gpu/transforms/collective_permute_cycle_decomposer.h\"\n#include \"xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h\"\n#include \"xla/service/gpu/transforms/command_buffer_scheduling.h\"\n#include \"xla/service/gpu/transforms/conv_rewriter.h\"\n#include \"xla/service/gpu/transforms/convert_async_collectives_to_sync.h\"\n#include \"xla/service/gpu/transforms/cudnn_custom_call_converter.h\"\n#include \"xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h\"\n#include \"xla/service/gpu/transforms/dot_dimension_sorter.h\"\n#include \"xla/service/gpu/transforms/dot_operand_converter.h\"\n#include \"xla/service/gpu/transforms/double_buffer_loop_unrolling.h\"\n#include \"xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h\"\n#include \"xla/service/gpu/transforms/fusion_block_level_rewriter.h\"\n#include \"xla/service/gpu/transforms/fusion_wrapper.h\"\n#include \"xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.h\"\n#include \"xla/service/gpu/transforms/gemm_fusion.h\"\n#include \"xla/service/gpu/transforms/gemm_rewriter.h\"\n#include \"xla/service/gpu/transforms/gemv_rewriter.h\"\n#include \"xla/service/gpu/transforms/layout_assignment.h\"\n#include \"xla/service/gpu/transforms/move_copy_to_users.h\"\n#include \"xla/service/gpu/transforms/pipelined_p2p_rewriter.h\"\n#include \"xla/service/gpu/transforms/reduce_scatter_creator.h\"\n#include \"xla/service/gpu/transforms/reduction_degenerate_dim_remover.h\"\n#include \"xla/service/gpu/transforms/reduction_dimension_grouper.h\"\n#include \"xla/service/gpu/transforms/reduction_layout_normalizer.h\"\n#include \"xla/service/gpu/transforms/reduction_splitter.h\"\n#include \"xla/service/gpu/transforms/rename_fusions.h\"\n#include \"xla/service/gpu/transforms/sanitize_constant_names.h\"\n#include \"xla/service/gpu/transforms/scatter_expander.h\"\n#include \"xla/service/gpu/transforms/scatter_slice_simplifier.h\"\n#include \"xla/service/gpu/transforms/softmax_rewriter_triton.h\"\n#include \"xla/service/gpu/transforms/stream_attribute_annotator.h\"\n#include \"xla/service/gpu/transforms/stream_attribute_async_wrapper.h\"\n#include \"xla/service/gpu/transforms/topk_specializer.h\"\n#include \"xla/service/gpu/transforms/topk_splitter.h\"\n#include \"xla/service/gpu/transforms/transpose_dimension_grouper.h\"\n#include \"xla/service/gpu/transforms/tree_reduction_rewriter.h\"\n#include \"xla/service/gpu/transforms/triton_fusion_numerics_verifier.h\"\n#include \"xla/service/gpu/transforms/windowed_einsum_handler.h\"\n#include \"xla/service/hlo.pb.h\"\n#include \"xla/service/hlo_computation_deduplicator.h\"\n#include \"xla/service/hlo_constant_folding.h\"\n#include \"xla/service/hlo_cost_analysis.h\"\n#include \"xla/service/hlo_cse.h\"\n#include \"xla/service/hlo_dataflow_analysis.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/hlo_rematerialization.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/service/host_memory_transfer_asyncifier.h\"\n#include \"xla/service/host_offload_legalize.h\"\n#include \"xla/service/host_offloader.h\"\n#include \"xla/service/layout_assignment.h\"\n#include \"xla/service/layout_normalization.h\"\n#include \"xla/service/llvm_ir/llvm_util.h\"\n#include \"xla/service/logistic_expander.h\"\n#include \"xla/service/operand_upcaster.h\"\n#include \"xla/service/optimization_barrier_expander.h\"\n#include \"xla/service/optimize_input_output_buffer_alias.h\"\n#include \"xla/service/qr_expander.h\"\n#include \"xla/service/real_imag_expander.h\"\n#include \"xla/service/reduce_decomposer.h\"\n#include \"xla/service/reduce_scatter_combiner.h\"\n#include \"xla/service/reduce_scatter_reassociate.h\"\n#include \"xla/service/reduce_window_rewriter.h\"\n#include \"xla/service/reshape_decomposer.h\"\n#include \"xla/service/reshape_mover.h\"\n#include \"xla/service/result_caster.h\"\n#include \"xla/service/rng_bit_generator_expander.h\"\n#include \"xla/service/rng_expander.h\"\n#include \"xla/service/scatter_expander.h\"\n#include \"xla/service/scatter_simplifier.h\"\n#include \"xla/service/sharding_remover.h\"\n#include \"xla/service/simplify_fp_conversions.h\"\n#include \"xla/service/slice_sinker.h\"\n#include \"xla/service/slow_operation_alarm.h\"\n#include \"xla/service/sort_simplifier.h\"\n#include \"xla/service/stable_sort_expander.h\"\n#include \"xla/service/stochastic_convert_decomposer.h\"\n#include \"xla/service/sub_byte_normalization.h\"\n#include \"xla/service/topk_rewriter.h\"\n#include \"xla/service/transpose_folding.h\"\n#include \"xla/service/tuple_simplifier.h\"\n#include \"xla/service/while_loop_all_reduce_code_motion.h\"\n#include \"xla/service/while_loop_constant_sinking.h\"\n#include \"xla/service/while_loop_simplifier.h\"\n#include \"xla/service/while_loop_trip_count_annotator.h\"\n#include \"xla/service/zero_sized_hlo_elimination.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/stream_executor/device_description.pb.h\"\n#include \"xla/stream_executor/dnn.h\"\n#include \"xla/stream_executor/platform.h\"\n#include \"xla/stream_executor/platform_manager.h\"\n#include \"xla/stream_executor/semantic_version.h\"\n#include \"xla/stream_executor/stream_executor.h\"\n#include \"xla/util.h\"\n#include \"xla/xla.pb.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/blocking_counter.h\"\n#include \"tsl/platform/casts.h\"\n#include \"tsl/platform/cpu_info.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/numbers.h\"\n#include \"tsl/platform/path.h\"\n#include \"tsl/platform/protobuf.h\" \n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/threadpool.h\"\n#include \"tsl/profiler/lib/scoped_annotation.h\"\n#include \"tsl/profiler/lib/traceme.h\"\n#ifdef PLATFORM_GOOGLE\n#include \"xla/hlo/experimental/auto_sharding/auto_sharding.h\"\n#endif \nnamespace xla {\nnamespace gpu {\nnamespace {\nusing MaybeOwningThreadPool = MaybeOwning;\nMaybeOwningThreadPool CreateMaybeOwningThreadPool(\n int parallelism, tsl::thread::ThreadPool* default_thread_pool,\n int default_parallelism) {\n CHECK_GE(parallelism, 0);\n CHECK_GE(default_parallelism, 1);\n CHECK(default_thread_pool == nullptr ||\n default_thread_pool->CurrentThreadId() == -1);\n auto create_thread_pool = [&](int num_threads) {\n CHECK_GE(num_threads, 1);\n return std::make_unique(tsl::Env::Default(), \"\",\n num_threads);\n };\n switch (parallelism) {\n case 0:\n if (default_thread_pool == nullptr && default_parallelism > 1) {\n return MaybeOwningThreadPool(create_thread_pool(default_parallelism));\n }\n return MaybeOwningThreadPool(default_thread_pool);\n case 1:\n return MaybeOwningThreadPool(nullptr);\n default:\n return MaybeOwningThreadPool(create_thread_pool(parallelism));\n }\n}\nabsl::StatusOr GetAutotuneConfig(\n se::StreamExecutor* stream_exec, const DebugOptions& debug_options,\n const GpuCompiler::CompileOptions& options,\n const Compiler::TargetConfig& gpu_target_config) {\n if (stream_exec) {\n return AutotuneConfig{DeviceConfig{stream_exec, options.device_allocator},\n debug_options};\n }\n return AutotuneConfig{DevicelessConfig{gpu_target_config.device_description},\n debug_options};\n}\nse::GpuComputeCapability GetGpuVersion(const se::StreamExecutor* stream_exec) {\n return stream_exec->GetDeviceDescription().gpu_compute_capability();\n}\nclass GpuThunkAotCompilationResult : public AotCompilationResult {\n public:\n static absl::StatusOr>\n FromModule(const HloModule* hlo_module,\n const BufferAssignment* buffer_assignment,\n std::string_view asm_text, absl::Span binary,\n const BinaryMap& dnn_compiled_graphs) {\n CompilationResultProto proto;\n *proto.mutable_hlo_module_with_config() = hlo_module->ToProtoWithConfig();\n *proto.mutable_buffer_assignment() = buffer_assignment->ToProto();\n proto.set_asm_text(std::string(asm_text));\n proto.set_binary(binary.data(), binary.size());\n proto.mutable_dnn_compiled_graphs()->insert(dnn_compiled_graphs.cbegin(),\n dnn_compiled_graphs.cend());\n return std::unique_ptr(\n new GpuThunkAotCompilationResult(hlo_module->Clone(),\n std::move(proto)));\n }\n static absl::StatusOr>\n FromString(const std::string& serialized) {\n CompilationResultProto proto;\n if (!proto.ParseFromString(serialized)) {\n return Internal(\n \"Failed to parse serialized GpuThunkAotCompilationResult.\");\n }\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr module,\n HloModule::CreateFromProtoWithConfig(proto.hlo_module_with_config()));\n return std::unique_ptr(\n new GpuThunkAotCompilationResult(std::move(module), std::move(proto)));\n }\n absl::StatusOr SerializeAsString() const override {\n return proto_.SerializeAsString();\n }\n absl::StatusOr> LoadExecutable(\n Compiler* compiler, const se::StreamExecutor* stream_exec) const override;\n const HloModule* optimized_module() const override { return module_.get(); }\n std::unique_ptr consume_optimized_module() override {\n return std::move(module_);\n }\n private:\n GpuThunkAotCompilationResult(std::unique_ptr module,\n CompilationResultProto proto)\n : module_(std::move(module)), proto_(std::move(proto)) {}\n std::unique_ptr module_;\n CompilationResultProto proto_;\n};\n} \nabsl::StatusOr>\nGpuThunkAotCompilationResult::LoadExecutable(\n Compiler* compiler, const se::StreamExecutor* stream_exec) const {\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr hlo_module,\n HloModule::CreateFromProtoWithConfig(proto_.hlo_module_with_config()));\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr buffer_assignment,\n BufferAssignment::FromProto(proto_.buffer_assignment(), hlo_module.get(),\n compiler->BufferSizeBytesFunction(),\n nullptr));\n ExecutionStreamAssignment execution_stream_assignment(hlo_module.get());\n std::vector binary(proto_.binary().begin(), proto_.binary().end());\n TF_ASSIGN_OR_RETURN(\n se::Platform * platform,\n se::PlatformManager::PlatformWithId(compiler->PlatformId()));\n std::string platform_name = platform->Name();\n const se::DeviceDescription& gpu_device_info =\n stream_exec->GetDeviceDescription();\n mlir::DialectRegistry registry;\n auto mlir_context = std::make_unique(registry);\n llvm::LLVMContext llvm_context;\n auto* gpu_compiler = dynamic_cast(compiler);\n if (gpu_compiler == nullptr) {\n return Internal(\"Compiler is not a GpuCompiler.\");\n }\n auto llvm_module = std::make_unique(\"\", llvm_context);\n llvm_module->setTargetTriple(gpu_compiler->target_triple());\n llvm_module->setDataLayout(gpu_compiler->data_layout());\n IrEmitterContext ir_emitter_context(\n hlo_module.get(), buffer_assignment.get(), &execution_stream_assignment,\n platform_name, gpu_device_info, mlir_context.get(), llvm_module.get(),\n nullptr,\n false);\n absl::string_view cache_file_path =\n hlo_module->config().debug_options().xla_gpu_kernel_cache_file();\n if (!cache_file_path.empty() &&\n hlo_module->config()\n .debug_options()\n .xla_gpu_enable_llvm_module_compilation_parallelism()) {\n TF_RETURN_IF_ERROR(LoadCache(ir_emitter_context, cache_file_path));\n }\n auto ir_emitter = IrEmitterUnnested::Create(&ir_emitter_context);\n TF_RETURN_IF_ERROR(\n ir_emitter->EmitHloComputation(hlo_module->entry_computation()));\n std::vector constants =\n std::move(ir_emitter_context.constants());\n TF_ASSIGN_OR_RETURN(auto output_info,\n GetOutputInfo(*hlo_module, *buffer_assignment));\n const Shape& output_shape = hlo_module->result_shape();\n int64_t debug_buffer_assignment_show_max =\n hlo_module->config()\n .debug_options()\n .xla_debug_buffer_assignment_show_max();\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr executable,\n GpuExecutable::Create(GpuExecutable::Params{\n proto_.asm_text(),\n binary,\n BinaryMap(proto_.dnn_compiled_graphs().cbegin(),\n proto_.dnn_compiled_graphs().cend()),\n gpu_device_info.gpu_compute_capability(),\n ir_emitter->ConsumeThunkSequence(),\n std::move(constants),\n std::move(output_info),\n std::move(hlo_module->name()),\n std::move(output_shape),\n std::nullopt,\n std::move(buffer_assignment),\n debug_buffer_assignment_show_max,\n std::move(hlo_module),\n true}));\n return executable;\n}\nGpuCompiler::GpuCompiler(se::Platform::Id platform_id,\n const char* target_triple, const char* data_layout)\n : platform_id_(platform_id),\n target_triple_(target_triple),\n data_layout_(data_layout),\n pointer_size_(llvm::DataLayout(data_layout)\n .getPointerSize(0 )) {}\nnamespace {\nvoid AddHloVerifier(HloPassPipeline* pipeline,\n bool verify_unique_channel_ids = false,\n HloVerifierOpts&& opts = {}, bool debug_only = false) {\n opts.verify_unique_channel_ids = verify_unique_channel_ids;\n std::unique_ptr verifier_metadata =\n std::make_unique(std::move(opts));\n if (debug_only) {\n pipeline->AddInvariantCheckerDebug(\n std::move(verifier_metadata), \"hlo verifier (debug)\");\n } else {\n pipeline->AddInvariantChecker(std::move(verifier_metadata),\n \"hlo verifier\");\n }\n}\nvoid CheckNotScheduled(HloModule* hlo_module) {\n if (hlo_module->has_schedule() &&\n !hlo_module->config().debug_options().xla_disable_all_hlo_passes()) {\n LOG(WARNING) << \"\\nThe current HLO module \" << hlo_module->name()\n << \" is scheduled and optimized. \\n\"\n << \"It is not expected to run optimization passes again.\\n\"\n \"Use a test method like RunAndCompareNoHloPasses() or \"\n << \"the xla_disable_all_hlo_passes flag.\";\n }\n}\nvoid LogDebugOptions(HloModule* hlo_module) {\n XLA_VLOG_LINES(\n 1, absl::StrFormat(\"GpuCompilationEnvironment of hlo_module %s:\\n%s\",\n hlo_module->name(),\n hlo_module->config().debug_options().DebugString()));\n}\nAlgebraicSimplifierOptions LayoutInsensitiveAlgebraicSimplifierOptions(\n const HloModuleConfig& hlo_module_config,\n const Compiler::TargetConfig& gpu_target_config,\n AlgebraicSimplifierOptions opts_from_compiler) {\n AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =\n opts_from_compiler;\n layout_insensitive_algsimp_opts.set_conv_is_lowerable_callback(\n ConvRewriter::ConvIsLowerable);\n layout_insensitive_algsimp_opts.set_enable_dot_strength_reduction(\n hlo_module_config.debug_options()\n .xla_gpu_enable_dot_strength_reduction());\n layout_insensitive_algsimp_opts.set_supports_non_canonical_dots(false);\n layout_insensitive_algsimp_opts.set_minmax_propagate_nan(\n !hlo_module_config.debug_options().xla_gpu_enable_fast_min_max());\n layout_insensitive_algsimp_opts\n .set_unconditionally_simplify_reduce_of_transpose_or_reshape(true);\n if (gpu_target_config.platform_name == \"ROCM\") {\n layout_insensitive_algsimp_opts.set_enable_conv_operand_swap(false);\n }\n layout_insensitive_algsimp_opts\n .set_enable_unconditional_reduce_of_concat_replacement(false);\n return layout_insensitive_algsimp_opts;\n}\nabsl::Status RunPreSPMDPartitionerPasses(HloModule* hlo_module) {\n HloPassPipeline pre_spmd_pipeline(\"pre-spmd-partitioner\");\n pre_spmd_pipeline.AddPass();\n pre_spmd_pipeline.AddPass();\n pre_spmd_pipeline.AddPass();\n pre_spmd_pipeline.AddPass();\n pre_spmd_pipeline.AddPass();\n pre_spmd_pipeline.AddPass();\n pre_spmd_pipeline.AddPass([&](const HloInstruction* instr) {\n return instr->opcode() == HloOpcode::kTopK;\n });\n pre_spmd_pipeline.AddPass(\n [](const HloSortInstruction*, int64_t) { return true; });\n return pre_spmd_pipeline.Run(hlo_module).status();\n}\nabsl::Status RunSPMDPasses(\n HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config,\n const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) {\n bool auto_sharding = hlo_module->config().use_auto_spmd_partitioning();\n#ifndef PLATFORM_GOOGLE\n if (auto_sharding) {\n LOG(ERROR) << \"GPU autosharding is not yet available in open source.\";\n }\n#endif\n const int64_t num_partitions = hlo_module->config().num_partitions();\n if (num_partitions > 1) {\n if (!hlo_module->config().use_spmd_partitioning()) {\n return InvalidArgument(\n \"num_partitions=%d but SPMD partitioning not enabled.\",\n num_partitions);\n }\n HloPassPipeline spmd_pipeline(\"spmd-partitioner\");\n AddSPMDPasses(\n hlo_module, layout_insensitive_algsimp_opts,\n gpu_target_config.device_description.gpu_compute_capability(),\n spmd_pipeline,\n#ifdef PLATFORM_GOOGLE\n [&](HloPassPipeline& pipeline) {\n if (auto_sharding) {\n AutoShardingOption option;\n option.enable = true;\n if (!hlo_module->config()\n .auto_spmd_partitioning_mesh_shape()\n .empty()) {\n option.device_mesh_shape =\n hlo_module->config().auto_spmd_partitioning_mesh_shape();\n } else {\n option.device_mesh_shape = {\n gpu_target_config.device_description.core_count(), 1};\n }\n if (!hlo_module->config()\n .auto_spmd_partitioning_mesh_ids()\n .empty()) {\n option.device_mesh_ids =\n hlo_module->config().auto_spmd_partitioning_mesh_ids();\n }\n option.memory_budget_per_device =\n hlo_module->config()\n .debug_options()\n .xla_gpu_auto_spmd_partitioning_memory_budget_gb() *\n 1024 * 1024 * 1024;\n option.memory_budget_ratio =\n hlo_module->config()\n .debug_options()\n .xla_gpu_auto_spmd_partitioning_memory_budget_ratio();\n spmd_pipeline.AddPass(option);\n }\n });\n#else\n std::nullopt);\n#endif \n if (hlo_module->config()\n .debug_options()\n .xla_gpu_unsafe_pipelined_loop_annotator()) {\n spmd_pipeline.AddPass();\n spmd_pipeline.AddPass();\n }\n return spmd_pipeline.Run(hlo_module).status();\n } else {\n HloPassPipeline sharding_removal_pipeline(\"sharding-removal\");\n sharding_removal_pipeline.AddPass();\n sharding_removal_pipeline.AddPass();\n return sharding_removal_pipeline.Run(hlo_module).status();\n }\n}\nabsl::Status RunOptimizationPasses(\n HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config,\n const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) {\n const DebugOptions& debug_options = hlo_module->config().debug_options();\n HloPassPipeline pipeline(\"optimization\");\n AddHloVerifier(&pipeline,\n !debug_options.xla_experimental_ignore_channel_id());\n if (debug_options.xla_gpu_multi_streamed_windowed_einsum()) {\n pipeline.AddPass();\n }\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n HloPredicate upcaster_filter = [&](const HloInstruction* instr) {\n const auto* cuda_cc = std::get_if(\n &gpu_target_config.device_description.gpu_compute_capability());\n if (cuda_cc != nullptr &&\n !cuda_cc->IsAtLeast(se::CudaComputeCapability::VOLTA)) {\n return true;\n }\n return !gpu::IsMatrixMultiplication(*instr);\n };\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass(upcaster_filter);\n pipeline.AddPass(upcaster_filter);\n pipeline.AddPass();\n pipeline.AddPass(\n SubByteNormalization::SET_ELEMENT_SIZE);\n pipeline.AddPass();\n pipeline.AddPass(RandomAlgorithm::RNG_PHILOX);\n pipeline.AddPass(std::array{std::make_pair(BF16, F32)});\n pipeline.AddPass();\n if (RequireDeterminism(hlo_module->config())) {\n pipeline.AddPass(\n ScatterExpander::kEliminateIndeterministicScatters);\n }\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass(\n true,\n true,\n true);\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n if (debug_options.xla_reduce_window_rewrite_base_length() != 0) {\n pipeline.AddPass>(\n debug_options.xla_reduce_window_rewrite_base_length());\n }\n DynamicPadderOptions dynamic_padder_options;\n switch (debug_options.xla_gpu_shape_checks()) {\n case DebugOptions::IGNORE:\n dynamic_padder_options.shape_check_mode =\n DynamicDimensionInference::ShapeCheckMode::kIgnore;\n break;\n case DebugOptions::RUNTIME: {\n dynamic_padder_options.shape_check_mode =\n DynamicDimensionInference::ShapeCheckMode::kRuntime;\n dynamic_padder_options.assertion_generator = [&](HloInstruction* inst) {\n auto created = Cast(\n inst->parent()->AddInstruction(HloInstruction::CreateCustomCall(\n ShapeUtil::MakeTokenShape(), {inst}, kXlaGpuAssertCustomCallTag,\n \"Buffers have different size at runtime\",\n API_VERSION_STATUS_RETURNING)));\n created->set_custom_call_has_side_effect(true);\n };\n break;\n }\n case DebugOptions::COMPILE_TIME:\n dynamic_padder_options.shape_check_mode =\n DynamicDimensionInference::ShapeCheckMode::kCompileTime;\n break;\n default:\n LOG(FATAL) << \"Unreachable\";\n }\n pipeline.AddPass(dynamic_padder_options);\n se::GpuComputeCapability gpu_version =\n gpu_target_config.device_description.gpu_compute_capability();\n [&, &pipeline =\n pipeline.AddPass>(\"simplification\")] {\n AddHloVerifier(&pipeline,\n !debug_options.xla_experimental_ignore_channel_id(),\n HloVerifierOpts{}, true);\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass(GatherExpander::kEliminateSimpleGathers);\n pipeline.AddPass();\n pipeline.AddPass(\n ScatterExpander::kEliminateSimpleScatters);\n pipeline.AddPass();\n pipeline.AddPass(layout_insensitive_algsimp_opts,\n gpu_version);\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass(\n int64_t{\n debug_options.xla_gpu_dot_merger_threshold_mb()}\n << 20);\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n ReshapeMoverOptions reshape_mover_options;\n reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;\n pipeline.AddPass(reshape_mover_options);\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass(CanFoldTransposeOperandIntoDot);\n pipeline.AddPass(false);\n pipeline.AddPass();\n }();\n [&, &pipeline =\n pipeline.AddPass>(\"simplification-2\")] {\n pipeline.AddPass();\n pipeline.AddPass(layout_insensitive_algsimp_opts,\n gpu_version);\n }();\n pipeline.AddPass(\n false);\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status AddCollectivePipelinerPasses(\n const DebugOptions& debug_options, HloPassPipeline& collectives_pipeline) {\n if (debug_options.xla_gpu_enable_pipelined_collectives() ||\n debug_options.xla_gpu_enable_pipelined_all_reduce()) {\n CollectivePipeliner::Config config{\n 0,\n INT64_MAX,\n true,\n false,\n true,\n CollectivePipeliner::PipeliningDirection::kForward,\n HloPredicateIsOp,\n HloPredicateTrue,\n HloPredicateFalse};\n collectives_pipeline.AddPass(config);\n }\n if (debug_options.xla_gpu_enable_pipelined_collectives() ||\n debug_options.xla_gpu_enable_pipelined_all_gather()) {\n CollectivePipeliner::Config config{\n 0,\n INT64_MAX,\n true,\n false,\n true,\n CollectivePipeliner::PipeliningDirection::kBackward,\n HloPredicateIsOp,\n HloPredicateTrue,\n HloPredicateFalse,\n HloPredicateFalse,\n false,\n std::nullopt,\n std::nullopt,\n true,\n };\n collectives_pipeline.AddPass(config);\n }\n if (debug_options.xla_gpu_enable_pipelined_collectives() ||\n debug_options.xla_gpu_enable_pipelined_reduce_scatter()) {\n CollectivePipeliner::Config config{\n 0,\n INT64_MAX,\n true,\n false,\n true,\n CollectivePipeliner::PipeliningDirection::kForward,\n HloPredicateIsOp,\n HloPredicateTrue,\n HloPredicateFalse};\n collectives_pipeline.AddPass(config);\n }\n return absl::OkStatus();\n}\nabsl::Status RunPostLayoutCollectivePipelinerPasses(HloModule* hlo_module) {\n const DebugOptions& debug_options = hlo_module->config().debug_options();\n HloPassPipeline collectives_pipeline(\"collective-pipeliner-optimizations\");\n if (debug_options.xla_gpu_run_post_layout_collective_pipeliner()) {\n TF_RETURN_IF_ERROR(\n AddCollectivePipelinerPasses(debug_options, collectives_pipeline));\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass();\n }\n return collectives_pipeline.Run(hlo_module).status();\n}\nabsl::Status RunCollectiveOptimizationPasses(\n HloModule* hlo_module,\n const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,\n se::GpuComputeCapability gpu_version) {\n const DebugOptions& debug_options = hlo_module->config().debug_options();\n HloPassPipeline collectives_pipeline(\"collective-optimizations\");\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass(\n debug_options.xla_gpu_enable_reassociation_for_converted_ar());\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass(\n debug_options\n .xla_gpu_enable_while_loop_reduce_scatter_code_motion());\n if (!debug_options.xla_gpu_run_post_layout_collective_pipeliner()) {\n TF_RETURN_IF_ERROR(\n AddCollectivePipelinerPasses(debug_options, collectives_pipeline));\n }\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass(\n hlo_module->config()\n .debug_options()\n .xla_gpu_collective_permute_decomposer_threshold());\n collectives_pipeline.AddPass(\n hlo_module->config()\n .debug_options()\n .xla_gpu_collective_permute_decomposer_threshold());\n if (hlo_module->config()\n .debug_options()\n .xla_gpu_enable_pipelined_collectives() ||\n hlo_module->config().debug_options().xla_gpu_enable_pipelined_p2p()) {\n AddP2PPipeliner(collectives_pipeline);\n }\n collectives_pipeline.AddPass(\n layout_insensitive_algsimp_opts, gpu_version);\n collectives_pipeline.AddPass();\n const std::pair ar_promoted_types[] = {\n {U16, U32}, {S16, S32}};\n collectives_pipeline.AddPass(ar_promoted_types);\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass();\n collectives_pipeline.AddPass();\n return collectives_pipeline.Run(hlo_module).status();\n}\nabsl::Status RunLayoutAssignmentPasses(HloModule* hlo_module,\n se::GpuComputeCapability gpu_version,\n se::dnn::VersionInfo dnn_version) {\n HloPassPipeline pipeline(\"layout assignment\");\n pipeline.AddPass();\n ChannelLayoutConstraints layout_constraints;\n pipeline.AddPass(\n hlo_module->mutable_entry_computation_layout(), gpu_version, dnn_version,\n &layout_constraints);\n pipeline.AddPass(\n SubByteNormalization::SET_ELEMENT_SIZE);\n pipeline.AddPass(true);\n pipeline.AddPass(\n static_cast(stream_executor::MemoryType::kHost),\n true);\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status RunFusionPasses(HloModule* hlo_module,\n const Compiler::TargetConfig& gpu_target_config,\n tsl::thread::ThreadPool* thread_pool,\n HloCostAnalysis::ShapeSizeFunction shape_size_fn) {\n const se::DeviceDescription& gpu_device_info =\n gpu_target_config.device_description;\n TF_RETURN_IF_ERROR(FusionPipeline(hlo_module->config().debug_options(),\n shape_size_fn, thread_pool, gpu_device_info)\n .Run(hlo_module)\n .status());\n if (hlo_module->config().debug_options().xla_gpu_collect_cost_model_stats()) {\n GpuHloCostAnalysis::Options cost_analysis_options{\n shape_size_fn,\n {},\n {},\n true};\n HloPassPipeline post_fusion_analysis(\"post_fusion_analysis\");\n post_fusion_analysis.AddPass(\n gpu_device_info, cost_analysis_options);\n TF_RETURN_IF_ERROR(post_fusion_analysis.Run(hlo_module).status());\n }\n TF_RETURN_IF_ERROR(\n HorizontalFusionPipeline(gpu_device_info).Run(hlo_module).status());\n if (VLOG_IS_ON(2)) {\n HloFusionStatsVisitor stats;\n TF_RETURN_IF_ERROR(hlo_module->entry_computation()->Accept(&stats));\n VLOG(2) << stats.ToString();\n }\n return absl::OkStatus();\n}\nvoid AddDoubleBufferingPasses(const DebugOptions& opts,\n HloPassPipeline& pipeline) {\n std::optional unroll_strategy =\n std::nullopt;\n if (opts.xla_gpu_enable_while_loop_double_buffering()) {\n unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer;\n }\n if (opts.xla_gpu_enable_while_loop_unrolling() ==\n DebugOptions::WHILE_LOOP_UNROLLING_DOUBLE_BUFFER) {\n unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer;\n }\n if (opts.xla_gpu_enable_while_loop_unrolling() ==\n DebugOptions::WHILE_LOOP_UNROLLING_FULL_UNROLL) {\n LOG_IF(WARNING, unroll_strategy != std::nullopt)\n << \"Overriding double buffering set via \"\n \"`xla_gpu_enable_while_loop_double_buffering` flag.\";\n unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll;\n }\n if (opts.xla_gpu_enable_while_loop_unrolling() ==\n DebugOptions::WHILE_LOOP_UNROLLING_AUTO_UNROLL &&\n opts.xla_gpu_enable_heuristic_pass_configuration() &&\n !opts.xla_gpu_enable_while_loop_double_buffering()) {\n unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kAuto;\n }\n if (unroll_strategy != std::nullopt) {\n pipeline.AddPass();\n pipeline.AddPass(*unroll_strategy);\n pipeline.AddPass();\n pipeline.AddPass();\n }\n}\nabsl::Status RunPostFusionPasses(\n HloModule* hlo_module,\n std::function\n add_custom_kernel_replacement_passes) {\n const DebugOptions& opts = hlo_module->config().debug_options();\n HloPassPipeline pipeline(\"post-fusion optimization\");\n pipeline.AddPass();\n pipeline.AddPass(\n opts.xla_gpu_all_gather_combine_threshold_bytes(),\n 256,\n opts.xla_gpu_enable_all_gather_combine_by_dim());\n pipeline.AddPass(\n opts.xla_gpu_all_reduce_combine_threshold_bytes(),\n 256);\n pipeline.AddPass(\n opts.xla_gpu_reduce_scatter_combine_threshold_bytes(),\n 256,\n opts.xla_gpu_enable_reduce_scatter_combine_by_dim());\n pipeline.AddPass();\n TF_RETURN_IF_ERROR(add_custom_kernel_replacement_passes(&pipeline, opts));\n int32_t blueconnect_num_devices_per_host =\n hlo_module->config()\n .debug_options()\n .xla_gpu_all_reduce_blueconnect_num_devices_per_host();\n if (blueconnect_num_devices_per_host > 0) {\n pipeline.AddPass(blueconnect_num_devices_per_host);\n }\n AddDoubleBufferingPasses(opts, pipeline);\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status RunPostFusionCollectiveOptimizationPasses(HloModule* hlo_module) {\n HloPassPipeline pipeline(\"post-fusion-collectives optimization\");\n AsyncCollectiveCreator::CollectiveCreatorConfig config;\n config.convert_all_reduce = HloPredicateTrue;\n config.convert_collective_broadcast = HloPredicateTrue;\n config.convert_collective_permute = HloPredicateTrue;\n config.convert_all_gather = HloPredicateTrue;\n config.convert_reduce_scatter = HloPredicateTrue;\n config.convert_all_to_all = HloPredicateTrue;\n pipeline.AddPass(std::move(config));\n absl::flat_hash_set disabled_async_ops;\n for (auto collective_op_type : hlo_module->config()\n .debug_options()\n .xla_gpu_disable_async_collectives()) {\n disabled_async_ops.insert(\n static_cast(collective_op_type));\n }\n auto convert_to_async = [&disabled_async_ops](const HloInstruction* inst) {\n switch (inst->opcode()) {\n case HloOpcode::kAllReduceStart:\n return !disabled_async_ops.contains(DebugOptions::ALLREDUCE);\n case HloOpcode::kCollectivePermuteStart:\n return !disabled_async_ops.contains(DebugOptions::COLLECTIVEPERMUTE);\n case HloOpcode::kAllGatherStart:\n return !disabled_async_ops.contains(DebugOptions::ALLGATHER);\n case HloOpcode::kAsyncStart: {\n auto async_inst = Cast(inst);\n switch (async_inst->async_wrapped_opcode()) {\n case HloOpcode::kCollectiveBroadcast:\n return !disabled_async_ops.contains(\n DebugOptions::COLLECTIVEBROADCAST);\n case HloOpcode::kReduceScatter:\n return !disabled_async_ops.contains(DebugOptions::REDUCESCATTER);\n case HloOpcode::kAllToAll:\n return !disabled_async_ops.contains(DebugOptions::ALLTOALL);\n default:\n return false;\n }\n }\n default:\n return false;\n }\n };\n pipeline.AddPass(convert_to_async);\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status RunPostFusionSimplificationPasses(\n HloModule* hlo_module,\n const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,\n se::GpuComputeCapability gpu_version) {\n HloPassPipeline pipeline(\"post-fusion-simplification-pipeline optimization\");\n AlgebraicSimplifierOptions options = layout_insensitive_algsimp_opts;\n options.set_is_layout_sensitive(true);\n pipeline.AddPass(options, gpu_version);\n pipeline.AddPass(\n true);\n if (hlo_module->config()\n .debug_options()\n .xla_gpu_multi_streamed_windowed_einsum()) {\n pipeline.AddPass();\n pipeline.AddPass();\n }\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status RunPostFusionVerificationPasses(\n HloModule* hlo_module, se::StreamExecutor* stream_exec,\n const GpuCompiler::CompileOptions& options,\n const Compiler::TargetConfig& gpu_target_config) {\n HloPassPipeline pipeline(\"post-fusion-verification-pipeline optimization\");\n if (hlo_module->config()\n .debug_options()\n .xla_gpu_verify_triton_fusion_numerics()) {\n TF_ASSIGN_OR_RETURN(\n AutotuneConfig autotune_config,\n GetAutotuneConfig(stream_exec, hlo_module->config().debug_options(),\n options, gpu_target_config));\n pipeline.AddPass(autotune_config);\n }\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status RunLayoutNormalizationPasses(\n HloModule* hlo_module, const se::GpuComputeCapability& gpu_version) {\n HloPassPipeline layout_normalization_pipeline(\"layout normalization\");\n const DebugOptions& debug_options = hlo_module->config().debug_options();\n AlgebraicSimplifierOptions opts =\n GpuCompiler::GetAlgebraicSimplifierOptions(hlo_module->config());\n opts.set_supports_non_canonical_dots(false);\n opts.set_is_layout_sensitive(true);\n opts.set_enable_conv_operand_swap(false);\n opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max());\n opts.set_enable_unconditional_reduce_of_concat_replacement(false);\n layout_normalization_pipeline.AddPass();\n layout_normalization_pipeline.AddPass>();\n layout_normalization_pipeline.AddPass(\n &NormalizeLayoutForGpuCustomCalls);\n layout_normalization_pipeline.AddPass>(\n opts, gpu_version);\n layout_normalization_pipeline.AddPass();\n layout_normalization_pipeline.AddPass();\n return layout_normalization_pipeline.Run(hlo_module).status();\n}\nabsl::Status RunAsyncDotPasses(HloModule* hlo_module) {\n HloPassPipeline pipeline(\"async-wrapper\");\n const DebugOptions& debug_options = hlo_module->config().debug_options();\n if (debug_options.xla_gpu_async_dot()) {\n pipeline.AddPass([](HloInstruction* instruction) {\n if (IsCublasGemm(*instruction)) {\n return true;\n }\n if (instruction->called_computations().size() == 1 &&\n IsTritonFusedComputation(\n *instruction->called_computations().front())) {\n return true;\n }\n return false;\n });\n }\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status RunDynamicSliceFusionPasses(HloModule* hlo_module,\n se::Platform::Id platform_id) {\n if (hlo_module->config()\n .debug_options()\n .xla_gpu_enable_dynamic_slice_fusion()) {\n HloPassPipeline pipeline(\"dynamic-slice\");\n TF_ASSIGN_OR_RETURN(se::Platform * platform,\n se::PlatformManager::PlatformWithId(platform_id));\n pipeline.AddPass(platform->Name());\n TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());\n }\n return absl::OkStatus();\n}\n} \nabsl::Status GpuCompiler::RunCollectiveScheduleLinearizerPasses(\n HloModule* hlo_module, se::StreamExecutor* stream_exec) {\n HloPassPipeline pipeline(\"collective-schedule-linearizer\");\n pipeline.AddPass(\n [this, stream_exec](const HloModule* module) {\n return RequiresCollectiveScheduleLinearizer(module, stream_exec);\n });\n return pipeline.Run(hlo_module).status();\n}\nabsl::Status GpuCompiler::OptimizeHloModule(\n HloModule* hlo_module, se::StreamExecutor* stream_exec,\n const CompileOptions& options, const TargetConfig& gpu_target_config) {\n tsl::profiler::TraceMe traceme(\"GpuCompiler::OptimizeHloModule\");\n CheckNotScheduled(hlo_module);\n LogDebugOptions(hlo_module);\n MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool(\n hlo_module->config()\n .debug_options()\n .xla_gpu_force_compilation_parallelism(),\n options.thread_pool,\n tsl::port::MaxParallelism());\n AlgebraicSimplifierOptions layout_insensitive_algsimp_opts =\n LayoutInsensitiveAlgebraicSimplifierOptions(\n hlo_module->config(), gpu_target_config,\n GetAlgebraicSimplifierOptions(hlo_module->config()));\n TF_RETURN_IF_ERROR(RunPreSPMDPartitionerPasses(hlo_module));\n TF_RETURN_IF_ERROR(RunSPMDPasses(hlo_module, gpu_target_config,\n layout_insensitive_algsimp_opts));\n TF_RETURN_IF_ERROR(RunOptimizationPasses(hlo_module, gpu_target_config,\n layout_insensitive_algsimp_opts));\n se::GpuComputeCapability gpu_version =\n gpu_target_config.device_description.gpu_compute_capability();\n TF_RETURN_IF_ERROR(RunCollectiveOptimizationPasses(\n hlo_module, layout_insensitive_algsimp_opts, gpu_version));\n se::dnn::VersionInfo dnn_version = gpu_target_config.dnn_version_info;\n if (stream_exec != nullptr) {\n gpu_version = GetGpuVersion(stream_exec);\n TF_ASSIGN_OR_RETURN(dnn_version, GetDnnVersionInfo(stream_exec));\n }\n TF_RETURN_IF_ERROR(OptimizeHloConvolutionCanonicalization(\n hlo_module, gpu_version, dnn_version, options.device_allocator,\n gpu_target_config.device_description.runtime_version()));\n TF_RETURN_IF_ERROR(\n RunLayoutAssignmentPasses(hlo_module, gpu_version, dnn_version));\n TF_RETURN_IF_ERROR(RunLayoutNormalizationPasses(hlo_module, gpu_version));\n TF_RETURN_IF_ERROR(OptimizeHloPostLayoutAssignment(\n hlo_module, stream_exec, options, gpu_target_config,\n thread_pool.get_mutable()));\n TF_RETURN_IF_ERROR(RunPostLayoutCollectivePipelinerPasses(hlo_module));\n TF_RETURN_IF_ERROR(RunDynamicSliceFusionPasses(hlo_module, PlatformId()));\n TF_RETURN_IF_ERROR(RunFusionPasses(hlo_module, gpu_target_config,\n thread_pool.get_mutable(),\n ShapeSizeBytesFunction()));\n TF_RETURN_IF_ERROR(RunPostFusionPasses(\n hlo_module,\n [this](HloPassPipeline* pipeline, const DebugOptions& debug_options) {\n return AddCustomKernelReplacementPasses(pipeline, debug_options);\n }));\n TF_RETURN_IF_ERROR(RunPostFusionCollectiveOptimizationPasses(hlo_module));\n TF_RETURN_IF_ERROR(RunPostFusionSimplificationPasses(\n hlo_module, layout_insensitive_algsimp_opts, gpu_version));\n TF_RETURN_IF_ERROR(RunPostFusionVerificationPasses(\n hlo_module, stream_exec, options, gpu_target_config));\n TF_RETURN_IF_ERROR(\n RunCollectiveScheduleLinearizerPasses(hlo_module, stream_exec));\n TF_RETURN_IF_ERROR(RunAsyncDotPasses(hlo_module));\n return absl::OkStatus();\n} \nAlgebraicSimplifierOptions GpuCompiler::GetAlgebraicSimplifierOptions(\n const HloModuleConfig& config) {\n AlgebraicSimplifierOptions opts;\n opts.set_enable_dot_strength_reduction(\n config.debug_options().xla_gpu_enable_dot_strength_reduction());\n return opts;\n}\nabsl::Status GpuCompiler::PrepareHloModuleForIrEmitting(HloModule* hlo_module) {\n return PrepareHloModuleForIrEmittingPipeline(*hlo_module, GetCanShareBuffer())\n .Run(hlo_module)\n .status();\n}\nnamespace {\nvoid AddGemmRewriterPasses(HloPassPipeline& pipeline,\n const DebugOptions& debug_options,\n const se::GpuComputeCapability gpu_version,\n const se::SemanticVersion& toolkit_version) {\n GemmRewriterOptions::BiasMode bias_mode =\n GemmRewriterOptions::BiasMode::kBias;\n if (debug_options.xla_gpu_async_dot()) {\n bias_mode = GemmRewriterOptions::BiasMode::kNoBias;\n }\n pipeline.AddPass(\n gpu_version, toolkit_version,\n GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only, bias_mode});\n pipeline.AddPass(\n gpu_version, toolkit_version,\n GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only, bias_mode});\n}\n} \nabsl::Status GpuCompiler::OptimizeHloPostLayoutAssignment(\n HloModule* hlo_module, se::StreamExecutor* stream_exec,\n const CompileOptions& options, const TargetConfig& gpu_target_config,\n tsl::thread::ThreadPool* thread_pool) {\n const DebugOptions& debug_options = hlo_module->config().debug_options();\n const se::GpuComputeCapability gpu_version =\n gpu_target_config.device_description.gpu_compute_capability();\n const AlgebraicSimplifierOptions simplifier_options = [&] {\n AlgebraicSimplifierOptions opts =\n GetAlgebraicSimplifierOptions(hlo_module->config());\n opts.set_supports_non_canonical_dots(false);\n opts.set_is_layout_sensitive(true);\n opts.set_enable_conv_operand_swap(false);\n opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max());\n opts.set_enable_unconditional_reduce_of_concat_replacement(false);\n return opts;\n }();\n TF_ASSIGN_OR_RETURN(AutotuneConfig autotune_config,\n GetAutotuneConfig(stream_exec, debug_options, options,\n gpu_target_config));\n const GpuFloatSupport bf16_support(gpu_version, BF16);\n const GpuFloatSupport f8e5m2_support(gpu_version, F8E5M2, F16);\n const GpuFloatSupport f8e4m3_support(gpu_version, F8E4M3, F16);\n const GpuFloatSupport f8e4m3fn_support(gpu_version, F8E4M3FN, F16);\n const FloatSupport f8e4m3b11fnuz_support(F8E4M3B11FNUZ, F16);\n const GpuFloatSupport f8e5m2fnuz_support(gpu_version, F8E5M2FNUZ, F16);\n const GpuFloatSupport f8e4m3fnuz_support(gpu_version, F8E4M3FNUZ, F16);\n const GpuFloatSupport f8e3m4_support(gpu_version, F8E3M4, F16);\n auto add_float_normalization = [&](HloPassPipeline& pipeline) {\n auto& sub_pipeline =\n pipeline.AddPass(\"float_normalization\");\n sub_pipeline.AddPass(&bf16_support);\n sub_pipeline.AddPass(&f8e5m2_support);\n sub_pipeline.AddPass(&f8e4m3_support);\n sub_pipeline.AddPass(&f8e4m3fn_support);\n sub_pipeline.AddPass(&f8e4m3b11fnuz_support);\n sub_pipeline.AddPass(&f8e5m2fnuz_support);\n sub_pipeline.AddPass(&f8e4m3fnuz_support);\n sub_pipeline.AddPass(&f8e3m4_support);\n if (debug_options.xla_allow_excess_precision()) {\n sub_pipeline.AddPass();\n }\n };\n {\n HloPassPipeline pipeline(\"hlo normalization\");\n pipeline.AddPass>(simplifier_options,\n gpu_version);\n pipeline.AddPass(CanFoldTransposeOperandIntoDot,\n TransposeFolding::NeverFoldTranspose);\n pipeline.AddPass();\n pipeline.AddPass([&](const HloInstruction* r) {\n return IsReductionFromOrToContiguousDimensions(*r);\n });\n if (debug_options.xla_gpu_enable_custom_fusions()) {\n pipeline.AddPass();\n pipeline.AddPass(\n &gpu_target_config.device_description);\n pipeline.AddPass(autotune_config);\n }\n se::GpuComputeCapability gpu_version =\n gpu_target_config.device_description.gpu_compute_capability();\n pipeline.AddPass(gpu_version);\n const auto* cuda_cc = std::get_if(&gpu_version);\n const auto* rocm_cc = std::get_if(&gpu_version);\n if (debug_options.xla_gpu_enable_triton_gemm() &&\n (cuda_cc != nullptr &&\n cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE))) {\n pipeline.AddPass();\n pipeline.AddPass(gpu_version);\n } else if (cuda_cc != nullptr &&\n cuda_cc->major == se::CudaComputeCapability::VOLTA) {\n pipeline.AddPass();\n pipeline.AddPass(\n &gpu_target_config.device_description);\n pipeline.AddPass(autotune_config);\n }\n AddGemmRewriterPasses(\n pipeline, debug_options, gpu_version,\n gpu_target_config.device_description.runtime_version());\n pipeline.AddPass();\n pipeline.AddPass(&NormalizeLayoutForGpuCustomCalls);\n pipeline.AddPass>(simplifier_options,\n gpu_version);\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass();\n if (debug_options\n .xla_gpu_experimental_enable_triton_softmax_priority_fusion() &&\n ((cuda_cc != nullptr &&\n cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE)) ||\n rocm_cc != nullptr)) {\n add_float_normalization(pipeline);\n pipeline.AddPass>(simplifier_options,\n gpu_version);\n pipeline.AddPass(true);\n pipeline.AddPass();\n pipeline.AddPass();\n pipeline.AddPass(\n gpu_target_config.device_description, ShapeSizeBytesFunction(),\n true);\n }\n pipeline.AddPass();\n bool ignore_small_reduce_dims =\n !debug_options.xla_gpu_enable_priority_fusion();\n pipeline.AddPass>(ignore_small_reduce_dims);\n pipeline.AddPass>(gpu_version);\n pipeline.AddPass(\n SubByteNormalization::SET_ELEMENT_SIZE);\n TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());\n }\n HloPassPipeline pipeline(\"post-layout_assignment\");\n AddHloVerifier(&pipeline, !debug_options.xla_experimental_ignore_channel_id(),\n HloVerifierOpts{}\n .MakeLayoutSensitive()\n .WithInstructionCanChangeLayout(\n LayoutAssignment::InstructionCanChangeLayout)\n .VerifyBroadcastDimensionsOrder()\n .VerifyReshapeIsBitcast(),\n true);\n add_float_normalization(pipeline);\n TF_RETURN_IF_ERROR(AddGemmFusionAutotuningPasses(\n &pipeline, hlo_module, autotune_config, thread_pool,\n options.key_value_store,\n gpu_target_config.device_description.runtime_version()));\n pipeline.AddPass();\n AddGemmRewriterPasses(pipeline, debug_options, gpu_version,\n gpu_target_config.device_description.runtime_version());\n pipeline.AddPass();\n pipeline.AddPass(\n static_cast(stream_executor::MemoryType::kHost));\n TF_RETURN_IF_ERROR(\n AddConvAndGemmAutotuningPasses(&pipeline, gpu_version, options,\n hlo_module, autotune_config, thread_pool));\n add_float_normalization(pipeline);\n pipeline.AddPass();\n pipeline.AddPass>(simplifier_options,\n gpu_version);\n if (debug_options.xla_allow_excess_precision()) {\n pipeline.AddPass();\n }\n pipeline.AddPass(true);\n pipeline.AddPass(\n static_cast(stream_executor::MemoryType::kHost));\n#ifdef NDEBUG\n HloVerifierOpts opts = HloVerifierOpts{}\n .MakeLayoutSensitive()\n .WithInstructionCanChangeLayout(\n LayoutAssignment::InstructionCanChangeLayout)\n .VerifyBroadcastDimensionsOrder()\n .VerifyReshapeIsBitcast();\n opts.verify_unique_channel_ids =\n !debug_options.xla_experimental_ignore_channel_id();\n pipeline.AddPass(\n std::make_unique(std::move(opts)),\n \"end-of-post-layout_assignment\");\n#endif \n TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());\n return absl::OkStatus();\n}\n absl::StatusOr GpuCompiler::GetTargetConfig(\n const Compiler::CompileOptions& options, const DebugOptions& debug_opts,\n se::StreamExecutor* executor) {\n if (options.target_config.has_value()) {\n return *options.target_config;\n }\n if (!debug_opts.xla_gpu_target_config_filename().empty()) {\n std::string gpu_target_config_string;\n TF_RETURN_IF_ERROR(tsl::ReadFileToString(\n tsl::Env::Default(), debug_opts.xla_gpu_target_config_filename(),\n &gpu_target_config_string));\n stream_executor::GpuTargetConfigProto gpu_target_config_proto;\n if (!tsl::protobuf::TextFormat::ParseFromString(gpu_target_config_string,\n &gpu_target_config_proto)) {\n return absl::FailedPreconditionError(\n \"Failed to parse GpuTargetConfigProto\");\n }\n return Compiler::TargetConfig{gpu_target_config_proto};\n }\n if (executor) {\n Compiler::TargetConfig target_config = Compiler::TargetConfig{executor};\n int64_t device_memory_size =\n target_config.device_description.device_memory_size();\n if (device_memory_size == -1) {\n return absl::FailedPreconditionError(\n \"When running on an NVIDIA simulation device, you must use \"\n \"--xla_gpu_target_config_filename to pass in target information. \"\n \"The target config from StreamExecutor is inaccurate.\");\n }\n return target_config;\n }\n return absl::InternalError(\n \"Either GPU has to be attached, or --xla_gpu_target_config_filename \"\n \"has to be specified to specify the target to compile for.\");\n}\nabsl::StatusOr> GpuCompiler::RunHloPasses(\n std::unique_ptr module, se::StreamExecutor* stream_exec,\n const CompileOptions& options) {\n const DebugOptions debug_opts = module->config().debug_options();\n TF_RETURN_IF_ERROR(LoadAutotuneResultsFromFile(debug_opts));\n bool is_deviceless = options.target_config.has_value() ||\n !debug_opts.xla_gpu_target_config_filename().empty();\n TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config,\n GetTargetConfig(options, debug_opts, stream_exec));\n const std::optional unoptimized_fingerprint =\n MaybeUploadUnoptimizedGpuSymbols(module.get(),\n gpu_target_config.ToProto());\n XLA_SCOPED_LOGGING_TIMER_IF(\n absl::StrCat(\"GpuCompiler::RunHloPasses for \", module->name()),\n !options.is_autotuning_compilation);\n uint64_t start_usecs = tsl::Env::Default()->NowMicros();\n tsl::profiler::TraceMe activity(\n [&] { return absl::StrCat(\"HLO Transforms:\", module->name()); },\n tsl::profiler::TraceMeLevel::kInfo);\n TF_RETURN_IF_ERROR(OptimizeHloModule(module.get(),\n is_deviceless ? nullptr : stream_exec,\n options, gpu_target_config));\n TF_RETURN_IF_ERROR(PrepareHloModuleForIrEmitting(module.get()));\n if (module->config()\n .debug_options()\n .xla_gpu_experimental_enable_fusion_block_level_rewriter()) {\n HloPassPipeline pipeline(\"fusion-block-level-rewriter-pipeline\");\n pipeline.AddPass(\n gpu_target_config.device_description, ShapeSizeBytesFunction());\n TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());\n }\n uint64_t end_usecs = tsl::Env::Default()->NowMicros();\n RecordHloPassesDuration(end_usecs - start_usecs);\n DumpHloModuleMetadataIfEnabled({module.get()});\n AutotuneResults autotune_results;\n TF_ASSIGN_OR_RETURN(\n AutotuneConfig autotune_config,\n GetAutotuneConfig(stream_exec, debug_opts, options, gpu_target_config));\n if (!is_deviceless) {\n TF_RETURN_IF_ERROR(\n AutotunerUtil::SerializeAutotuneResults(&autotune_results));\n TF_RETURN_IF_ERROR(SerializeAutotuneResultsToFile(debug_opts));\n }\n const std::optional optimized_fingerprint =\n MaybeUploadOptimizedGpuSymbols(module.get(), autotune_results);\n if (unoptimized_fingerprint.has_value() &&\n optimized_fingerprint.has_value()) {\n MaybeUploadGpuSymbolMapping(*unoptimized_fingerprint,\n *optimized_fingerprint);\n }\n if (DumpingEnabledForHloModule(*module)) {\n TF_ASSIGN_OR_RETURN(\n std::string autotune_results,\n AutotunerUtil::SerializeAutotuneResults(true));\n DumpToFileInDirOrStdout(*module, \"\", \"autotune_results.pbtxt\",\n autotune_results);\n }\n return std::move(module);\n}\nnamespace {\nabsl::Status RunPostSchedulingCopyInsertion(\n HloModule* module,\n const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {\n constexpr int64_t kRegionBasedLiveRangeAnalysisLimit = -1;\n const int64_t kUseRegionBasedLiveRangeAnalysis =\n module->config()\n .debug_options()\n .xla_gpu_copy_insertion_use_region_analysis()\n ? kRegionBasedLiveRangeAnalysisLimit\n : 0;\n CopyInsertion copy_insertion(can_share_buffer,\n kUseRegionBasedLiveRangeAnalysis);\n TF_RETURN_IF_ERROR(copy_insertion.RemoveUnnecessaryCopies(module));\n HloSchedule saved_schedule = module->schedule();\n module->clear_schedule();\n TF_RETURN_IF_ERROR(\n copy_insertion.CopyInsertion::AddSpecialCaseCopies(module));\n TF_RETURN_IF_ERROR(HloDCE().Run(module).status());\n TF_RETURN_IF_ERROR(saved_schedule.Update());\n TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule)));\n return absl::OkStatus();\n}\n} \nusing OutputInfoMap =\n absl::flat_hash_map;\nstatic void NullDiagnosticHandler(const llvm::DiagnosticInfo* diag_info,\n void* context) {\n std::string error_string;\n llvm::raw_string_ostream string_printer(error_string);\n llvm::DiagnosticPrinterRawOStream diagnostic_printer(string_printer);\n diag_info->print(diagnostic_printer);\n VLOG(5) << error_string;\n}\nnamespace {\nstd::unique_ptr CopyToContext(const llvm::Module& module,\n llvm::LLVMContext& context) {\n llvm::SmallString<0> bitcode;\n llvm::raw_svector_ostream bitcode_ostream(bitcode);\n llvm::WriteBitcodeToFile(module, bitcode_ostream);\n llvm::Expected> new_module =\n llvm::parseBitcodeFile(\n llvm::MemoryBufferRef(llvm::StringRef(bitcode.data(), bitcode.size()),\n \"split_module\"),\n context);\n CHECK(new_module) << \"Failed to parse bitcode \"\n << llvm::toString(new_module.takeError());\n return std::move(new_module.get());\n}\n} \nabsl::StatusOr\nGpuCompiler::CompileSingleModule(const HloModuleConfig& module_config,\n se::GpuComputeCapability gpu_version,\n const HloModule* debug_module,\n llvm::Module* llvm_module, bool relocatable,\n const CompileOptions& options,\n std::optional shard_number) {\n {\n XLA_SCOPED_LOGGING_TIMER_IF(\n absl::StrCat(\n \"GpuCompiler::RunBackend - Running LLVM verifier for \",\n (debug_module != nullptr ? debug_module->name() : \"(unknown)\")),\n VLOG_IS_ON(4) && !options.is_autotuning_compilation);\n llvm_module->getContext().setDiagnosticHandlerCallBack(\n NullDiagnosticHandler, nullptr);\n std::string err;\n llvm::raw_string_ostream err_stream(err);\n TF_RET_CHECK(!llvm::verifyModule(*llvm_module, &err_stream))\n << \"Invalid LLVM IR before optimizations:\\n\"\n << err_stream.str()\n << \"\\nThis probably indicates a bug in the HLO -> LLVM IR \"\n \"lowering. Rerun with --xla_dump_to to get the IR\"\n << (debug_module\n ? absl::StrCat(\" and looks for files with name containing: *\",\n FilenameFor(*debug_module, \"\", \"\"), \"*\")\n : \".\");\n }\n TF_ASSIGN_OR_RETURN(\n BackendCompileResult result,\n CompileTargetBinary(module_config, llvm_module, gpu_version, relocatable,\n debug_module, options));\n const bool should_dump = DumpingEnabledForHloModule(\n debug_module ? debug_module->name() : \"\", module_config.debug_options());\n if (should_dump) {\n if (debug_module) {\n llvm_ir::DumpIrIfEnabled(\n *debug_module, *llvm_module,\n true,\n shard_number.has_value() ? std::to_string(*shard_number) : \"\");\n } else {\n LOG(ERROR) << \"Dumping is not implemented since the file name cannot be \"\n \"inferred. Please implement (potentially MLIR) module -> \"\n \"filename heuristic.\";\n }\n }\n if (user_post_optimization_hook_) {\n user_post_optimization_hook_(*llvm_module);\n }\n if (should_dump) {\n absl::string_view ptx = result.asm_text;\n if (debug_module) {\n DumpToFileInDirOrStdout(*debug_module, \"\",\n shard_number.has_value()\n ? (std::to_string(*shard_number) + \".ptx\")\n : \"ptx\",\n ptx);\n } else {\n LOG(ERROR) << \"Dumping is not implemented since the file name cannot be \"\n \"inferred. Please implement (potentially MLIR) module -> \"\n \"filename heuristic.\";\n }\n }\n return result;\n}\nnamespace {\nint CountFunctions(const llvm::Module& module) {\n int num_functions = 0;\n for (const llvm::Function& func : module.functions()) {\n if (!func.isDeclaration() &&\n func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) {\n ++num_functions;\n }\n }\n return num_functions;\n}\nstd::string SingleFunctionName(const llvm::Module& module) {\n std::string name;\n for (const llvm::Function& func : module.functions()) {\n if (!func.isDeclaration() &&\n func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) {\n if (name.empty()) {\n name = func.getName().str();\n } else {\n return \"\";\n }\n }\n }\n return name;\n}\n} \nabsl::StatusOr GpuCompiler::CompileAndLink(\n const HloModuleConfig& module_config,\n CompileModuleResults& compile_module_results,\n se::GpuComputeCapability gpu_version, se::StreamExecutor* stream_exec,\n const CompileOptions& options, const HloModule* debug_module) {\n llvm::Module* llvm_module = &*compile_module_results.llvm_module;\n bool force_module_split =\n module_config.debug_options().xla_llvm_force_inline_before_split();\n if (force_module_split) {\n for (llvm::Function& func : llvm_module->functions()) {\n if (func.getNumUses() > 0 && !func.isDeclaration()) {\n VLOG(4) << absl::StrFormat(\"Inlining function %s with %d users.\\n\",\n func.getName().str(), func.getNumUses());\n std::vector calls_to_inline;\n for (auto* user : func.users()) {\n if (auto* call = llvm::dyn_cast(user)) {\n calls_to_inline.push_back(call);\n }\n }\n for (auto* call_to_inline : calls_to_inline) {\n llvm::InlineFunctionInfo inline_function_info;\n if (!llvm::InlineFunction(*call_to_inline, inline_function_info)\n .isSuccess()) {\n return absl::InternalError(\"Can not inline function \" +\n func.getName().str());\n };\n }\n }\n }\n }\n llvm::DenseMap const_initializer_map;\n llvm::Module& module_with_constants =\n (compile_module_results.llvm_module_constants == nullptr)\n ? *llvm_module\n : *compile_module_results.llvm_module_constants;\n for (llvm::GlobalVariable& gv : module_with_constants.globals()) {\n if (gv.hasName() && gv.isConstant() && gv.hasInitializer() &&\n gv.hasExternalLinkage()) {\n llvm::Constant* initializer = gv.getInitializer();\n unsigned int num_elements = 0;\n if (auto* caz =\n llvm::dyn_cast(initializer)) {\n num_elements = caz->getElementCount().getFixedValue();\n } else if (auto* cds = llvm::dyn_cast(\n initializer)) {\n num_elements = cds->getNumElements();\n }\n if (num_elements > 0) {\n const_initializer_map[gv.getName()] = initializer;\n }\n }\n }\n llvm_ir::DumpIrIfEnabled(*debug_module, *llvm_module,\n false, \"inlined\");\n absl::string_view cache_path =\n module_config.debug_options().xla_gpu_kernel_cache_file();\n const bool use_cache = !cache_path.empty();\n struct NamedModule {\n std::string name;\n std::unique_ptr module;\n };\n std::vector llvm_modules;\n MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool(\n module_config.debug_options()\n .xla_gpu_force_compilation_parallelism(),\n options.thread_pool,\n 1);\n int num_modules = CountFunctions(*llvm_module);\n if (thread_pool.get() != nullptr && !use_cache) {\n num_modules = std::max(1, std::min(thread_pool->NumThreads(), num_modules));\n }\n if (compile_module_results.llvm_module_constants != nullptr) {\n llvm_modules.reserve(num_modules + 1);\n llvm_modules.push_back(\n {\"\", std::move(compile_module_results.llvm_module_constants)});\n } else {\n llvm_modules.reserve(num_modules);\n }\n int single_function_module_count = 0;\n llvm::SplitModule(\n *llvm_module, num_modules,\n [&](std::unique_ptr module) {\n for (llvm::GlobalVariable& gv : module->globals()) {\n if (gv.hasName() && gv.isConstant() && !gv.hasInitializer() &&\n const_initializer_map.count(gv.getName()) != 0) {\n gv.setInitializer(const_initializer_map[gv.getName()]);\n gv.setLinkage(llvm::GlobalValue::InternalLinkage);\n }\n }\n const std::string name = SingleFunctionName(*module);\n if (!name.empty()) {\n ++single_function_module_count;\n }\n llvm_modules.push_back({name, std::move(module)});\n },\n true, true);\n VLOG(2) << \"Single-function cacheable modules: \"\n << single_function_module_count << \" / \" << llvm_modules.size();\n struct NamedCompileResult {\n std::string name;\n absl::StatusOr result;\n };\n std::vector compile_results(llvm_modules.size());\n if (thread_pool.get() != nullptr) {\n tsl::BlockingCounter counter(llvm_modules.size());\n for (int i = 0; i < llvm_modules.size(); ++i) {\n thread_pool.get_mutable()->Schedule(\n [&compile_results, i, &llvm_modules, &counter, this, &module_config,\n &gpu_version, &debug_module, &options] {\n llvm::LLVMContext new_context;\n std::unique_ptr new_module =\n CopyToContext(*llvm_modules.at(i).module, new_context);\n compile_results.at(i) = {\n llvm_modules.at(i).name,\n CompileSingleModule(module_config, gpu_version, debug_module,\n new_module.get(),\n true, options,\n i)};\n counter.DecrementCount();\n });\n }\n counter.Wait();\n } else {\n for (int i = 0; i < llvm_modules.size(); ++i) {\n compile_results.at(i) = {\n llvm_modules.at(i).name,\n CompileSingleModule(module_config, gpu_version, debug_module,\n &*llvm_modules.at(i).module,\n true, options,\n i)};\n }\n }\n std::string ptx_snippets;\n std::vector> binaries_to_link;\n binaries_to_link.reserve(compile_results.size());\n std::vector binaries_to_cache;\n binaries_to_cache.reserve(single_function_module_count);\n for (const auto& [name, maybe_result] : compile_results) {\n TF_ASSIGN_OR_RETURN(auto result, maybe_result);\n if (result.binary.empty()) {\n continue;\n }\n ptx_snippets += result.asm_text;\n ptx_snippets += \"\\n\";\n binaries_to_link.push_back(result.binary);\n if (!name.empty()) {\n binaries_to_cache.push_back({name, result.binary});\n }\n }\n if (use_cache) {\n std::string resolved_path;\n if (!tsl::io::ResolveTestPrefixes(cache_path, resolved_path)) {\n return FailedPrecondition(\"File path can not be resolved: %s\",\n cache_path);\n }\n const CompilationCacheProto& current_cache =\n compile_module_results.kernel_compilation_cache;\n const bool cache_file_exists =\n tsl::Env::Default()->FileExists(resolved_path).ok();\n if (cache_file_exists) {\n int loaded_kernel_count = 0;\n for (const auto& [name, entry] : current_cache.entries()) {\n if (llvm_module->getFunction(name) != nullptr) {\n VLOG(5) << \"Using the just compiled kernel for \" << name;\n TF_RET_CHECK(entry.binary().empty())\n << name\n << \" is a just compiled kernel and is not expected to have a \"\n \"binary yet.\";\n continue;\n }\n const uint8_t* binary =\n reinterpret_cast(entry.binary().data());\n binaries_to_link.push_back(\n std::vector(binary, binary + entry.binary().size()));\n VLOG(5) << \"Using \" << name << \" from cache: \" << entry.binary().size();\n ++loaded_kernel_count;\n }\n VLOG(2) << \"Using \" << loaded_kernel_count << \" / \"\n << current_cache.entries_size() << \" cached kernels.\";\n }\n if (!binaries_to_cache.empty()) {\n TF_RETURN_IF_ERROR(\n UpdateDiskKernelCache(resolved_path, cache_file_exists,\n current_cache, binaries_to_cache));\n }\n }\n auto maybe_backend_result =\n LinkModules(gpu_version, stream_exec, std::move(binaries_to_link),\n module_config.debug_options());\n if (!maybe_backend_result.ok()) {\n LOG(ERROR) << \"The CUDA linking API did not work. Please use XLA_FLAGS=\"\n \"--xla_gpu_enable_llvm_module_compilation_parallelism=false \"\n \"to bypass it, but expect to get longer compilation time due \"\n \"to the lack of multi-threading. Original error: \"\n << maybe_backend_result.status();\n return maybe_backend_result.status();\n }\n VLOG(4) << \"Binary size after linking [B]: \" << maybe_backend_result->size();\n compile_module_results.kernel_compilation_cache.Clear();\n return BackendCompileResult{ptx_snippets, std::move(*maybe_backend_result)};\n}\nabsl::StatusOr\nGpuCompiler::CompileToBackendResult(\n HloModule* module, llvm::LLVMContext* llvm_context,\n se::StreamExecutor* executor, const CompileOptions& options,\n const se::DeviceDescription& gpu_device_info) {\n tsl::profiler::TraceMe traceme(\"GpuCompiler::CompileToBackendResult\");\n TF_RETURN_IF_ERROR(RunPreSchedulingPasses(module, executor));\n TF_ASSIGN_OR_RETURN(\n ScheduleMetadata schedule_metadata,\n ScheduleGpuModule(module, pointer_size_, gpu_device_info));\n TF_RETURN_IF_ERROR(RunPostSchedulingPipelines(\n module, schedule_metadata.scheduler_mem_limit, gpu_device_info));\n TF_ASSIGN_OR_RETURN(se::Platform * platform,\n se::PlatformManager::PlatformWithId(PlatformId()));\n bool can_use_link_modules = (executor != nullptr);\n if (can_use_link_modules) {\n TF_ASSIGN_OR_RETURN(can_use_link_modules,\n CanUseLinkModules(module->config()));\n }\n const bool split_modules =\n can_use_link_modules &&\n module->config()\n .debug_options()\n .xla_gpu_enable_llvm_module_compilation_parallelism();\n const bool use_cache =\n split_modules &&\n !module->config().debug_options().xla_gpu_kernel_cache_file().empty();\n TF_ASSIGN_OR_RETURN(\n CompileModuleResults compile_module_results,\n CompileModuleToLlvmIr(module, llvm_context, target_triple_, data_layout_,\n platform->Name(), platform->id(), gpu_device_info,\n GetCanShareBuffer(), BufferSizeBytesFunction(),\n use_cache));\n if (user_pre_optimization_hook_) {\n user_pre_optimization_hook_(*compile_module_results.llvm_module);\n if (compile_module_results.llvm_module_constants != nullptr) {\n user_pre_optimization_hook_(\n *compile_module_results.llvm_module_constants);\n }\n }\n llvm_ir::DumpIrIfEnabled(*module, *compile_module_results.llvm_module,\n false);\n if (compile_module_results.llvm_module_constants != nullptr) {\n llvm_ir::DumpIrIfEnabled(*module,\n *compile_module_results.llvm_module_constants,\n false, \"constants\");\n }\n BackendCompileResult backend_result;\n if (split_modules) {\n TF_ASSIGN_OR_RETURN(backend_result,\n CompileAndLink(module->config(), compile_module_results,\n gpu_device_info.gpu_compute_capability(),\n executor, options, module));\n } else {\n CHECK(compile_module_results.llvm_module_constants == nullptr);\n TF_ASSIGN_OR_RETURN(\n backend_result,\n CompileSingleModule(module->config(),\n gpu_device_info.gpu_compute_capability(), module,\n &*compile_module_results.llvm_module,\n false, options,\n std::nullopt));\n }\n RecordXlaDeviceBinarySize(backend_result.binary.size());\n if (DumpingEnabledForHloModule(*module)) {\n DumpToFileInDirOrStdout(\n *module, \"\", \"thunk_sequence.txt\",\n compile_module_results.executable->ToString(0));\n }\n return CompileResultWithMetadata{std::move(backend_result),\n std::move(compile_module_results)};\n}\nabsl::StatusOr> GpuCompiler::RunBackend(\n std::unique_ptr module, se::StreamExecutor* stream_exec,\n const CompileOptions& options) {\n tsl::profiler::ScopedAnnotation backend_annotation{[&] {\n return absl::StrFormat(\"XlaCompileBackend:#module=%s,program_id=%d#\",\n module->name(), module->unique_id());\n }};\n BinaryMap dnn_compiled_graphs;\n if (stream_exec) {\n TF_RETURN_IF_ERROR(RunCudnnCompilerPasses(module.get(), stream_exec,\n &dnn_compiled_graphs));\n }\n const DebugOptions& debug_opts = module->config().debug_options();\n TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config,\n GetTargetConfig(options, debug_opts, stream_exec));\n if (DumpingEnabledForHloModule(*module)) {\n std::string textproto;\n tsl::protobuf::TextFormat::PrintToString(gpu_target_config.ToProto(),\n &textproto);\n DumpToFileInDirOrStdout(*module, \"\", \"gpu_target_config.pbtxt\", textproto);\n }\n if (!options.is_autotuning_compilation) {\n VLOG(1) << \"Starting to compile HLO module \" << module->name();\n }\n XLA_SCOPED_LOGGING_TIMER_IF(\n absl::StrCat(\"GpuCompiler::RunBackend for \", module->name()),\n !options.is_autotuning_compilation);\n std::string slow_compilation_msg =\n absl::StrCat(\"Compiling module \", module->name());\n auto slow_compile_alarm = SlowCompilationAlarm(slow_compilation_msg);\n if (options.is_autotuning_compilation) {\n if (module->config().debug_options().xla_embed_ir_in_executable()) {\n LOG(WARNING) << \"Doing autotuning compilations with \"\n \"xla_embed_ir_in_executable wastes memory!\";\n }\n }\n llvm::LLVMContext llvm_context;\n const se::DeviceDescription& gpu_device_info =\n gpu_target_config.device_description;\n if (module->config().hlo_profiling_enabled() || VLOG_IS_ON(1)) {\n HloCostAnalysis::Options cost_analysis_options{ShapeSizeBytesFunction()};\n cost_analysis_options.set_bytes_per_second(\n gpu_device_info.memory_bandwidth());\n GpuHloCostAnalysis cost_analysis(cost_analysis_options, gpu_device_info);\n TF_RETURN_IF_ERROR(module->entry_computation()->Accept(&cost_analysis));\n if (!options.is_autotuning_compilation) {\n VLOG(1) << \"HLO memory read+written: \"\n << tsl::strings::HumanReadableNumBytes(\n cost_analysis.bytes_accessed());\n }\n if (module->config().hlo_profiling_enabled()) {\n LOG(ERROR) << \"--xla_hlo_profile for GPU is unsupported.\";\n }\n }\n TF_ASSIGN_OR_RETURN(\n CompileResultWithMetadata res,\n CompileToBackendResult(module.get(), &llvm_context, stream_exec, options,\n gpu_device_info));\n if (DumpingEnabledForHloModule(*module)) {\n DumpToFileInDirOrStdout(\n *module, \"\", \"thunk_sequence.txt\",\n res.compile_module_results.executable->ToString(0));\n }\n bool embed_ir_in_executable =\n module->config().debug_options().xla_embed_ir_in_executable();\n int64_t debug_buffer_assignment_show_max =\n module->config().debug_options().xla_debug_buffer_assignment_show_max();\n tsl::profiler::ScopedAnnotation annotation([&] {\n return absl::StrFormat(\"XlaCreateGpuExecutable:#module=%s#\",\n module->name());\n });\n TF_ASSIGN_OR_RETURN(\n auto gpu_executable,\n GpuExecutable::Create(GpuExecutable::Params{\n (options.is_autotuning_compilation &&\n !res.backend_result.binary.empty())\n ? std::string()\n : std::move(res.backend_result.asm_text),\n std::move(res.backend_result.binary),\n std::move(dnn_compiled_graphs),\n gpu_device_info.gpu_compute_capability(),\n std::move(res.compile_module_results.executable),\n std::move(res.compile_module_results.constants),\n std::move(res.compile_module_results.output_info),\n std::move(res.compile_module_results.module_name),\n std::move(res.compile_module_results.output_shape),\n (res.compile_module_results.use_original_allocations\n ? std::optional>()\n : std::move(res.compile_module_results.allocations)),\n std::move(res.compile_module_results.buffer_assignment),\n debug_buffer_assignment_show_max,\n options.is_autotuning_compilation\n ? std::unique_ptr()\n : std::move(module),\n !options.is_autotuning_compilation}));\n if (embed_ir_in_executable) {\n std::string ir_module_string_before_opt =\n llvm_ir::DumpToString(res.compile_module_results.llvm_module.get());\n gpu_executable->set_ir_module_string(ir_module_string_before_opt);\n DCHECK_NE(\"\", ir_module_string_before_opt);\n }\n IncrementCompiledProgramsCount();\n if (!options.is_autotuning_compilation && gpu_executable->has_module()) {\n auto hlo_proto = std::make_unique();\n *hlo_proto->mutable_buffer_assignment() =\n gpu_executable->buffer_assignment()->ToProto();\n gpu_executable->set_hlo_proto(std::move(hlo_proto));\n gpu_executable->set_debug_info(\n gpu_executable->buffer_assignment()->GetStats().ToString());\n }\n return static_cast>(std::move(gpu_executable));\n}\nabsl::StatusOr>>\nGpuCompiler::CompileAheadOfTime(std::unique_ptr module_group,\n const AotCompilationOptions& options) {\n CHECK_EQ(options.PlatformId(), PlatformId());\n std::vector> modules =\n module_group->ConsumeModules();\n std::vector> optimized_modules;\n optimized_modules.reserve(modules.size());\n for (std::unique_ptr& module : modules) {\n if (!module->has_schedule()) {\n tsl::profiler::ScopedAnnotation annotation{[&] {\n return absl::StrFormat(\"XlaCompile:#module=%s,program_id=%d#\",\n module->name(), module->unique_id());\n }};\n CompileOptions compile_options;\n compile_options.device_allocator = options.device_allocator();\n compile_options.target_config = options.target_config();\n TF_ASSIGN_OR_RETURN(\n std::unique_ptr optimized_module,\n RunHloPasses(std::move(module), options.executor(), compile_options));\n optimized_modules.push_back(std::move(optimized_module));\n } else {\n optimized_modules.push_back(std::move(module));\n }\n }\n modules = std::move(optimized_modules);\n std::vector> results;\n const std::optional& target_config =\n options.target_config();\n CHECK(target_config.has_value() || options.executor() != nullptr);\n const se::DeviceDescription& gpu_device_info =\n target_config.has_value() ? target_config->device_description\n : options.executor()->GetDeviceDescription();\n for (const std::unique_ptr& module : modules) {\n llvm::LLVMContext llvm_context;\n TF_ASSIGN_OR_RETURN(\n CompileResultWithMetadata res,\n CompileToBackendResult(module.get(), &llvm_context, options.executor(),\n {options.device_allocator()}, gpu_device_info));\n TF_ASSIGN_OR_RETURN(\n results.emplace_back(),\n GpuThunkAotCompilationResult::FromModule(\n module.get(), res.compile_module_results.buffer_assignment.get(),\n res.backend_result.asm_text, res.backend_result.binary,\n res.backend_result.dnn_compiled_graphs));\n }\n return std::move(results);\n}\nHloCostAnalysis::ShapeSizeFunction GpuCompiler::ShapeSizeBytesFunction() const {\n return [pointer_size = pointer_size_](const Shape& shape) {\n return GetSizeOfShape(shape, pointer_size);\n };\n}\nabsl::StatusOr> GpuCompiler::Export(\n Executable* executable) const {\n auto* gpu_executable = tensorflow::down_cast(executable);\n if (!gpu_executable) return Internal(\"GpuExecutable is null\");\n return GpuThunkAotCompilationResult::FromModule(\n &gpu_executable->module(), gpu_executable->buffer_assignment(),\n gpu_executable->text(), gpu_executable->binary(),\n gpu_executable->dnn_compiled_graphs());\n}\nabsl::Status GpuCompiler::RunPreSchedulingPasses(\n HloModule* module, se::StreamExecutor* stream_exec) {\n HloPassPipeline pipeline(\"pre-scheduling-passes\");\n pipeline.AddPass();\n return pipeline.Run(module).status();\n}\nHloCostAnalysis::Options CreateHloAnalysisOpts(\n const HloModule& module, const se::DeviceDescription& gpu_device_info,\n ShapeSizeFn shape_size_fn) {\n HloCostAnalysis::Options hlo_cost_analysis_options;\n hlo_cost_analysis_options.shape_size = shape_size_fn;\n std::optional\n offloading_config = std::nullopt;\n if (module.config().debug_options().xla_gpu_enable_host_memory_offloading()) {\n constexpr float kGiga = 1e+9;\n constexpr float kFma = 2;\n float flops_per_sec = gpu_device_info.core_count() *\n gpu_device_info.fpus_per_core() *\n gpu_device_info.clock_rate_ghz() * kGiga * kFma;\n int64_t host_memory_space_color =\n static_cast(se::MemoryType::kHost);\n hlo_cost_analysis_options.set_flops_per_second(flops_per_sec);\n hlo_cost_analysis_options.set_transcendentals_per_second(flops_per_sec);\n offloading_config =\n std::make_optional(\n host_memory_space_color,\n gpu_device_info.memory_bandwidth(),\n gpu_device_info.memory_bandwidth());\n }\n return hlo_cost_analysis_options;\n}\nHloRematerialization::Options CreateRematOpts(\n const HloModule& module, const se::DeviceDescription& gpu_device_info,\n HloCostAnalysis& hlo_cost_analysis, int64_t scheduler_mem_limit) {\n bool enable_offloading =\n module.config().debug_options().xla_gpu_enable_host_memory_offloading();\n std::optional\n offloading_config = std::nullopt;\n if (enable_offloading) {\n int64_t host_memory_space_color =\n static_cast(se::MemoryType::kHost);\n offloading_config =\n std::make_optional(\n host_memory_space_color,\n gpu_device_info.memory_bandwidth(),\n gpu_device_info.memory_bandwidth());\n }\n HloRematerialization::RematerializationModeConfig\n rematerialization_mode_config(true, true,\n enable_offloading);\n HloRematerialization::Options options(\n hlo_cost_analysis, rematerialization_mode_config,\n scheduler_mem_limit,\n 1, 1,\n 0, nullptr,\n offloading_config);\n return options;\n}\nabsl::Status GpuCompiler::RunPostSchedulingPipelines(\n HloModule* module, int64_t scheduler_mem_limit,\n const se::DeviceDescription& gpu_device_info) const {\n TF_RETURN_IF_ERROR(\n RunPostSchedulingCopyInsertion(module, GetCanShareBuffer()));\n HloPassPipeline main_pipeline(\"post-scheduling-passes\");\n HloPredicate is_nop =\n HloPredicateIsOp;\n {\n HloPassPipeline& pipeline =\n main_pipeline.AddPass(\"async-to-sync-converter\");\n if (module->config()\n .debug_options()\n .xla_gpu_enable_pipelined_collectives() ||\n module->config().debug_options().xla_gpu_enable_pipelined_p2p()) {\n pipeline.AddPass();\n }\n pipeline.AddPass(is_nop);\n }\n HloRematerialization::RematerializationSizes sizes;\n HloCostAnalysis::Options hlo_cost_analysis_opts =\n CreateHloAnalysisOpts(*module, gpu_device_info, ShapeSizeBytesFunction());\n HloCostAnalysis hlo_cost_analysis(hlo_cost_analysis_opts);\n HloRematerialization::Options remat_opts = CreateRematOpts(\n *module, gpu_device_info, hlo_cost_analysis, scheduler_mem_limit);\n {\n HloPassPipeline& pipeline =\n main_pipeline.AddPass(\"remat-pipeline\");\n pipeline.AddPass(remat_opts, sizes);\n pipeline.AddPass();\n pipeline.AddPass();\n }\n {\n HloPassPipeline& pipeline =\n main_pipeline.AddPass(\"fusion-wrapper\");\n pipeline.AddPass();\n }\n {\n HloPassPipeline& pipeline =\n main_pipeline.AddPass(\"command-buffer-scheduling\");\n pipeline.AddPass(gpu_device_info);\n pipeline.AddPass();\n }\n if (module->config().debug_options().xla_gpu_enable_pgle_accuracy_checker()) {\n AddHloVerifier(\n &main_pipeline,\n module->config().debug_options().xla_experimental_ignore_channel_id(),\n HloVerifierOpts{}.VerifyInstructionNameUnchanged());\n }\n return main_pipeline.Run(module).status();\n}\nabsl::Status GpuCompiler::LoadAutotuneResultsFromFile(\n const DebugOptions& debug_options) {\n if (absl::string_view file_path =\n debug_options.xla_gpu_load_autotune_results_from();\n !file_path.empty()) {\n static absl::once_flag once;\n absl::Status status = absl::OkStatus();\n absl::call_once(once, [&file_path, &status] {\n status = AutotunerUtil::LoadAutotuneResultsFromFile(file_path);\n });\n TF_RETURN_IF_ERROR(status);\n }\n return absl::OkStatus();\n}\nabsl::Status GpuCompiler::SerializeAutotuneResultsToFile(\n const DebugOptions& debug_options) {\n if (absl::string_view file_path =\n debug_options.xla_gpu_dump_autotune_results_to();\n !file_path.empty()) {\n TF_RETURN_IF_ERROR(\n AutotunerUtil::SerializeAutotuneResultsToFile(file_path));\n }\n return absl::OkStatus();\n}\nabsl::StatusOr>\nGpuCompiler::LoadAotCompilationResult(\n const std::string& serialized_aot_result) {\n return LoadAotCompilationResultStatic(serialized_aot_result);\n}\nabsl::StatusOr>\nGpuCompiler::LoadAotCompilationResultStatic(\n const std::string& serialized_aot_result) {\n return GpuThunkAotCompilationResult::FromString(serialized_aot_result);\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/gpu_compiler.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/strings/substitute.h\"\n#include \"xla/autotune_results.pb.h\"\n#include \"xla/error_spec.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_module_group.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/compiler.h\"\n#include \"xla/service/executable.h\"\n#include \"xla/service/gpu/autotuning/autotuner_util.h\"\n#include \"xla/service/gpu/gpu_hlo_schedule.h\"\n#include \"xla/service/gpu/metrics.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/service/xla_debug_info_manager.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/stream_executor/platform.h\"\n#include \"xla/stream_executor/platform_manager.h\"\n#include \"xla/tests/filecheck.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/literal_test_util.h\"\n#include \"xla/tests/verified_hlo_module.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/casts.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/path.h\"\n#include \"tsl/platform/protobuf.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nnamespace m = ::xla::match;\nusing ::testing::IsEmpty;\nusing ::testing::Not;\nusing ::testing::TempDir;\nclass GpuCompilerTest : public HloTestBase {\n public:\n absl::Status Schedule(HloModule* module) {\n auto compiler = backend().compiler();\n const se::DeviceDescription& gpu_device_info =\n backend().default_stream_executor()->GetDeviceDescription();\n TF_RETURN_IF_ERROR(ScheduleGpuModule(module, 4, gpu_device_info).status());\n return tensorflow::down_cast(compiler)\n ->RunPostSchedulingPipelines(module, 4 * 1024 * 1024, gpu_device_info);\n }\n const stream_executor::GpuComputeCapability& GpuComputeComp() {\n return backend()\n .default_stream_executor()\n ->GetDeviceDescription()\n .gpu_compute_capability();\n }\n};\nTEST_F(GpuCompilerTest, CompiledProgramsCount) {\n const char* hlo_text = R\"(\nHloModule test\nENTRY main {\n p = f32[10]{0} parameter(0)\n ROOT neg = f32[10]{0} negate(p)\n}\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_text).value();\n ResetCompiledProgramsCountForTesting();\n std::unique_ptr executable =\n backend()\n .compiler()\n ->RunBackend(std::move(module), backend().default_stream_executor(),\n {nullptr,\n nullptr,\n {},\n false})\n .value();\n EXPECT_EQ(GetCompiledProgramsCount(), 1);\n}\nTEST_F(GpuCompilerTest, GenerateDebugInfoForNonAutotuningCompilations) {\n const char* hlo_text = R\"(\nHloModule test\nENTRY main {\n p = f32[10]{0} parameter(0)\n ROOT neg = f32[10]{0} negate(p)\n}\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_text).value();\n std::unique_ptr executable =\n backend()\n .compiler()\n ->RunBackend(std::move(module), backend().default_stream_executor(),\n {nullptr,\n nullptr,\n {},\n false})\n .value();\n EXPECT_TRUE(XlaDebugInfoManager::Get()->TracksModule(\n executable->module().unique_id()));\n}\nTEST_F(GpuCompilerTest, DoesNotGenerateDebugInfoForAutotuningCompilations) {\n const char* hlo_text = R\"(\nHloModule test\nENTRY main {\n p = f32[10]{0} parameter(0)\n ROOT neg = f32[10]{0} negate(p)\n}\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_text).value();\n int module_id = module->unique_id();\n std::unique_ptr executable =\n backend()\n .compiler()\n ->RunBackend(std::move(module), backend().default_stream_executor(),\n {nullptr,\n nullptr,\n {},\n true})\n .value();\n EXPECT_FALSE(XlaDebugInfoManager::Get()->TracksModule(module_id));\n}\nTEST_F(GpuCompilerTest, CopyInsertionFusion) {\n const char* hlo_text = R\"(\nHloModule cluster\nENTRY main {\n cst = f32[1]{0} constant({0})\n ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cst, cst, cst, cst)\n}\n)\";\n EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{0, 0}));\n auto module = ParseAndReturnVerifiedModule(hlo_text).value();\n std::unique_ptr compiled_module =\n backend()\n .compiler()\n ->RunHloPasses(module->Clone(), backend().default_stream_executor(),\n nullptr)\n .value();\n VLOG(2) << compiled_module->ToString();\n size_t total_fusion_instrs = 0;\n for (const HloInstruction* instr :\n compiled_module->entry_computation()->instructions()) {\n if (instr->opcode() == HloOpcode::kFusion) {\n ++total_fusion_instrs;\n }\n }\n EXPECT_EQ(total_fusion_instrs, 1);\n const HloInstruction* entry_root =\n compiled_module->entry_computation()->root_instruction();\n EXPECT_THAT(\n entry_root,\n GmockMatch(m::Tuple(\n m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()),\n m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()))));\n}\nTEST_F(GpuCompilerTest, CanRunScheduledModules) {\n HloModuleConfig config;\n DebugOptions debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_disable_all_hlo_passes(true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\nHloModule m, is_scheduled=true\nw {\n p = s8[] parameter(0)\n ROOT n = s8[] negate(p)\n}\nENTRY e {\n p = s8[] parameter(0)\n ROOT _ = s8[] fusion(p), kind=kLoop, calls=w\n})\",\n config));\n EXPECT_TRUE(Run(std::move(module), true));\n}\nTEST_F(GpuCompilerTest, NonFusedInstructionsAreWrapped) {\n HloModuleConfig config;\n DebugOptions debug_options = GetDebugOptionsForTest();\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(R\"(\nHloModule m\nENTRY e {\n p = f32[2,4,4] parameter(0)\n ROOT _ = f32[2,4,4]{2,1,0} transpose(p), dimensions={0,2,1}\n})\",\n config));\n config.set_debug_options(debug_options);\n std::unique_ptr executable =\n backend()\n .compiler()\n ->RunBackend(std::move(module), backend().default_stream_executor(),\n {nullptr,\n nullptr,\n {},\n false})\n .value();\n HloModule& compiled_module = executable->module();\n const HloInstruction* entry_root =\n compiled_module.entry_computation()->root_instruction();\n EXPECT_THAT(entry_root, GmockMatch(m::Fusion()));\n}\nclass PersistedAutotuningTest : public HloTestBase {\n protected:\n static constexpr absl::string_view kHloText = R\"(\nHloModule t\nENTRY e {\n p0 = f16[1,16,17,3] parameter(0)\n p1 = s8[16,17,3] parameter(1)\n cp1 = f16[16,17,3] convert(p1)\n ROOT _ = f16[1,16,16] dot(p0, cp1),\n lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}\n})\";\n std::string GetUniqueTempFilePath(absl::string_view suffix) {\n std::string filename = TempDir();\n CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename,\n std::string(suffix)));\n return filename;\n }\n std::string ExpectToReadNonEmptyFile(absl::string_view file_path) {\n std::string str;\n tsl::Env* env = tsl::Env::Default();\n TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str));\n EXPECT_THAT(str, Not(IsEmpty()));\n return str;\n }\n DebugOptions GetDebugOptionsForTest() override {\n DebugOptions options = HloTestBase::GetDebugOptionsForTest();\n options.set_xla_gpu_dump_autotune_results_to(\n xla_gpu_dump_autotune_results_to_);\n options.set_xla_gpu_load_autotune_results_from(\n xla_gpu_load_autotune_results_from_);\n return options;\n }\n std::string xla_gpu_dump_autotune_results_to_;\n std::string xla_gpu_load_autotune_results_from_;\n};\nTEST_F(PersistedAutotuningTest, WriteResultsOnEachCompilation) {\n constexpr absl::string_view kInvalidTextProto = \"Invalid!\";\n xla_gpu_dump_autotune_results_to_ = GetUniqueTempFilePath(\".txt\");\n TF_EXPECT_OK(GetOptimizedModule(kHloText).status());\n {\n std::string autotune_results_str =\n ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);\n AutotuneResults results;\n EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,\n &results));\n }\n tsl::Env* env = tsl::Env::Default();\n TF_EXPECT_OK(tsl::WriteStringToFile(env, xla_gpu_dump_autotune_results_to_,\n kInvalidTextProto));\n TF_EXPECT_OK(GetOptimizedModule(kHloText).status());\n {\n std::string autotune_results_str =\n ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_);\n AutotuneResults results;\n EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str,\n &results));\n }\n}\nint64_t CountCopies(const HloComputation& computation) {\n int64_t count = 0;\n for (const auto& instruction : computation.instructions()) {\n if (instruction->opcode() == HloOpcode::kCopy) {\n count++;\n }\n }\n return count;\n}\nint64_t CountCopies(const HloModule& module) {\n int64_t count = 0;\n for (const auto& computation : module.computations()) {\n count += CountCopies(*computation);\n }\n return count;\n}\nTEST_F(GpuCompilerTest, RemovesUnnecessaryCopyAfterScheduling) {\n const absl::string_view hlo_string = R\"(\nHloModule all_gather_overlapping\ncondition {\n input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)\n ROOT cond = pred[] get-tuple-element(input_tuple), index=2\n}\nbody {\n input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0)\n param_0 = f32[1,128] get-tuple-element(input_tuple), index=0\n param_1 = f32[2,128] get-tuple-element(input_tuple), index=1\n cond = pred[] get-tuple-element(input_tuple), index=2\n c0 = f32[] constant(0)\n splat_c0 = f32[1,128] broadcast(c0), dimensions={}\n add = f32[1,128] add(splat_c0, param_0)\n all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true\n c1_s32 = s32[] constant(1)\n c0_s32 = s32[] constant(0)\n dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128}\n all-gather-done = f32[2,128] all-gather-done(all-gather-start)\n ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, all-gather-done, cond)\n}\nENTRY main {\n param_0 = f32[1,128] parameter(0)\n param_1 = f32[2,128] parameter(1)\n param_2 = pred[] parameter(2)\n tuple = (f32[1,128], f32[2,128], pred[]) tuple(param_0, param_1, param_2)\n ROOT while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n GetOptimizedModule(hlo_string));\n EXPECT_EQ(CountCopies(*module), 7);\n const HloInstruction* root = module->entry_computation()->root_instruction();\n const HloInstruction* while_op = root->operand(0)->operand(0);\n EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),\n HloOpcode::kCopy);\n TF_ASSERT_OK(Schedule(module.get()));\n EXPECT_EQ(CountCopies(*module), 4);\n module->entry_computation()->root_instruction();\n while_op = root->operand(0)->operand(0);\n EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(),\n HloOpcode::kAllGatherDone);\n}\nTEST_F(GpuCompilerTest,\n GemmFusionIsNoOpWhenGemmFusionAutotunerFallsBackToCublas) {\n auto cc = backend()\n .default_stream_executor()\n ->GetDeviceDescription()\n .cuda_compute_capability();\n if (!cc.IsAtLeastAmpere()) {\n GTEST_SKIP() << \"Autotuning results have only been generated for Ampere \"\n << \"and Hopper GPUs\";\n }\n const absl::string_view hlo_string = R\"(\nHloModule test\nENTRY main {\n param_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} parameter(0)\n param_1 = bf16[4,3,32,1024]{3,2,1,0} parameter(1)\n param_2 = s32[] parameter(2)\n constant_0 = s32[] constant(0)\n dynamic-slice_0 = bf16[1,3,32,1024]{3,2,1,0} dynamic-slice(param_1, param_2, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,3,32,1024}\n reshape_0 = bf16[3,32,1024]{2,1,0} reshape(dynamic-slice_0)\n broadcast_0 = bf16[3,32,1024,4,1024]{2,1,4,3,0} broadcast(reshape_0), dimensions={0,1,2}\n add_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} add(param_0, broadcast_0)\n transpose_0 = bf16[3,4,1024,32,1024]{2,1,4,3,0} transpose(add_0), dimensions={0,3,4,1,2}\n slice_0 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[0:1], [0:4], [0:1024], [0:32], [0:1024]}\n reshape_1 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_0)\n copy_0 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_1)\n constant_1 = bf16[] constant(0.08838)\n broadcast_1 = bf16[4,1024,32,1024]{3,2,1,0} broadcast(constant_1), dimensions={}\n multiply_0 = bf16[4,1024,32,1024]{3,2,1,0} multiply(copy_0, broadcast_1)\n slice_1 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[1:2], [0:4], [0:1024], [0:32], [0:1024]}\n reshape_2 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_1)\n copy_1 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_2)\n ROOT dot_0 = bf16[4,32,1024,1024]{3,2,1,0} dot(multiply_0, copy_1), lhs_batch_dims={0,2}, lhs_contracting_dims={3}, rhs_batch_dims={0,2}, rhs_contracting_dims={3}\n}\n)\";\n HloModuleConfig config;\n DebugOptions triton_enabled_debug_options = GetDebugOptionsForTest();\n triton_enabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);\n triton_enabled_debug_options\n .set_xla_gpu_require_complete_aot_autotune_results(true);\n config.set_debug_options(triton_enabled_debug_options);\n config.set_replica_count(1);\n config.set_num_partitions(1);\n std::string path =\n tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"service\", \"gpu\",\n \"gpu_compiler_test_autotune_db.textproto\");\n TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(path));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo_string, config));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr triton_enabled_module,\n GetOptimizedModule(std::move(module)));\n AutotunerUtil::ClearAutotuneResults();\n DebugOptions triton_disabled_debug_options = GetDebugOptionsForTest();\n triton_disabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);\n triton_disabled_debug_options.set_xla_gpu_enable_triton_gemm(false);\n config.set_debug_options(triton_disabled_debug_options);\n TF_ASSERT_OK_AND_ASSIGN(module,\n ParseAndReturnVerifiedModule(hlo_string, config));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr triton_disabled_module,\n GetOptimizedModule(std::move(module)));\n const HloInstruction* root =\n triton_enabled_module->entry_computation()->root_instruction();\n const HloInstruction* custom_op = root->operand(0)->operand(0);\n EXPECT_TRUE(custom_op->IsCustomCall(\"__cublas$gemm\"));\n EXPECT_EQ(triton_enabled_module->computation_count(),\n triton_disabled_module->computation_count());\n}\nclass FloatNormalizationTest : public GpuCompilerTest,\n public ::testing::WithParamInterface<\n std::pair> {};\nINSTANTIATE_TEST_SUITE_P(\n Fp8s, FloatNormalizationTest,\n ::testing::Values(\n std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN),\n std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN),\n std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2),\n std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E5M2)));\nTEST_P(FloatNormalizationTest, Fp8Normalization) {\n const PrimitiveType lhs_type = GetParam().first;\n const PrimitiveType rhs_type = GetParam().second;\n const std::string lhs_name =\n primitive_util::LowercasePrimitiveTypeName(lhs_type);\n const std::string rhs_name =\n primitive_util::LowercasePrimitiveTypeName(rhs_type);\n const std::string module_str = absl::Substitute(R\"(\nHloModule sch\nENTRY main {\n parameter = $0[1600,1600]{1,0} parameter(0)\n parameter.1 = $1[1600,1600]{1,0} parameter(1)\n neg = $1[1600,1600]{1,0} negate(parameter.1)\n dot = f16[1600,1600]{1,0} dot(parameter,neg), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n constant = f16[] constant(0)\n broadcast = f16[1600,1600]{1,0} broadcast(constant), dimensions={}\n ROOT maximum = f16[1600,1600]{1,0} maximum(dot,broadcast)\n})\",\n lhs_name, rhs_name);\n auto optimize_module = [&](bool enable_triton, bool enable_blas,\n bool enable_blas_fallback)\n -> absl::StatusOr> {\n HloModuleConfig config;\n DebugOptions debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_gpu_cublas_fallback(enable_blas_fallback);\n debug_options.set_xla_gpu_enable_triton_gemm(enable_triton);\n if (!enable_blas) {\n debug_options.add_xla_disable_hlo_passes(\"cublas-gemm-rewriter\");\n }\n config.set_debug_options(debug_options);\n config.set_num_partitions(1);\n TF_ASSIGN_OR_RETURN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, config));\n return GetOptimizedModule(std::move(module));\n };\n auto cc = backend()\n .default_stream_executor()\n ->GetDeviceDescription()\n .cuda_compute_capability();\n const std::string triton_keep_types = absl::Substitute(\n R\"(CHECK: fusion($0{{[^)]*}}, $1{{[^)]*}}){{.*}}\"kind\":\"__triton_gemm\")\",\n lhs_name, rhs_name);\n const std::string cublaslt_keep_types = absl::Substitute(\n R\"(CHECK: custom-call($0{{[^)]*}}, $1{{[^)]*}}){{.*}}custom_call_target=\"__cublas$$lt$$matmul$$f8\")\",\n lhs_name, rhs_name);\n const std::string cublas_convert_to_f16 =\n R\"(CHECK: custom-call(f16{{[^)]*}}, f16{{[^)]*}}){{.*}}custom_call_target=\"__cublas$gemm\")\";\n const std::string fallback_convert_to_f16 =\n R\"(CHECK: dot(f16{{[^)]*}}, f16{{[^)]*}}))\";\n {\n TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_fallback,\n optimize_module(true,\n true,\n false));\n const std::string triton_expected_check =\n (cc.IsAtLeastHopper() ||\n (cc.IsAtLeastAmpere() && lhs_type == F8E5M2 && rhs_type == F8E5M2))\n ? triton_keep_types\n : cublas_convert_to_f16;\n TF_ASSERT_OK_AND_ASSIGN(\n bool filecheck_matched,\n RunFileCheck(optimized_module_no_fallback->ToString(),\n triton_expected_check));\n EXPECT_TRUE(filecheck_matched);\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_triton,\n optimize_module(false,\n true,\n true));\n const std::string blas_expected_check =\n (cc.IsAtLeastHopper() && !(lhs_type == F8E5M2 && rhs_type == F8E5M2))\n ? cublaslt_keep_types\n : cublas_convert_to_f16;\n TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,\n RunFileCheck(optimized_module_no_triton->ToString(),\n blas_expected_check));\n EXPECT_TRUE(filecheck_matched);\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_nothing,\n optimize_module(false,\n false,\n false));\n TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,\n RunFileCheck(optimized_module_nothing->ToString(),\n fallback_convert_to_f16));\n EXPECT_TRUE(filecheck_matched);\n }\n}\nTEST_F(GpuCompilerTest, CollectivePermuteDecompositionAndPipelining) {\n const char* kModuleStr = R\"(\nHloModule cp\ncond {\n param = (u32[], f32[1, 1024, 1024]) parameter(0)\n count = get-tuple-element(%param), index=0\n ub = u32[] constant(11)\n ROOT result = pred[] compare(count, ub), direction=LT\n }\nbody {\n param = (u32[], f32[1, 1024, 1024]) parameter(0)\n count = get-tuple-element(%param), index=0\n send-data = get-tuple-element(%param), index=1\n recv-data = f32[1, 1024, 1024] collective-permute(send-data),\n source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, channel_id=1\n c1 = u32[] constant(1)\n new_count = u32[] add(count, c1)\n replica = u32[] replica-id()\n c10 = u32[] constant(10)\n sum = u32[] add(replica, c10)\n sum2 = u32[] add(sum, count)\n conv = f32[] convert(sum2)\n p = f32[1, 1024, 1024] broadcast(conv), dimensions={}\n b = f32[1, 1024, 1024] add(p, recv-data)\n c = f32[1, 1024, 1024] multiply(b, b)\n d = f32[1, 1024, 1024] tan(c)\n s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0},\n lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}\n ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)\n}\nENTRY test_computation {\n c0 = u32[] constant(0)\n f0 = f32[] constant(0.0)\n init = f32[1, 1024, 1024] broadcast(f0), dimensions={}\n while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)\n while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond\n ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1\n}\n)\";\n const char* kExpected = R\"(\nCHECK: recv-done\nCHECK-SAME: channel_id=[[CHANNEL_ID:[0-9]+]]\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\"}\nCHECK: send-done\nCHECK-SAME: channel_id=[[CHANNEL_ID]]\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\"}\nCHECK: %[[CUSTOM_CALL:.*]] = custom-call\nCHECK: %[[AFTER_ALL:.*]] = after-all\nCHECK: %[[RESULT_RECV:.*]] = recv(%[[AFTER_ALL]])\nCHECK-SAME: channel_id=[[CHANNEL_ID]]\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\",\nCHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},\nCHECK-SAME: control-predecessors={%[[CUSTOM_CALL]]}\nCHECK: %[[RESULT_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[AFTER_ALL]])\nCHECK-SAME: channel_id=1\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\",\nCHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},\nCHECK-SAME: control-predecessors={%[[RESULT_RECV]]}\nCHECK: ROOT\nCHECK-SAME: %[[RESULT_RECV]]\nCHECK: ENTRY\nCHECK: %[[ENTRY_AFTER_ALL:.*]] = after-all\nCHECK: %[[ENTRY_RECV:.*]] = recv(%[[ENTRY_AFTER_ALL]])\nCHECK-SAME: channel_id=[[CHANNEL_ID]]\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\",\nCHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}}\nCHECK: %[[ENTRY_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[ENTRY_AFTER_ALL]])\nCHECK-SAME: channel_id=1\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\",\nCHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}},\nCHECK-SAME: control-predecessors={%[[ENTRY_RECV]]}\nCHECK: %[[WHILE_INIT:.*]] = tuple\nCHECK-SAME: %[[ENTRY_SEND]]\nCHECK: while(%[[WHILE_INIT]])\nCHECK: recv-done\nCHECK-SAME: channel_id=[[CHANNEL_ID]]\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\"}\nCHECK: send-done\nCHECK-SAME: channel_id=[[CHANNEL_ID]]\nCHECK-SAME: frontend_attributes={_xla_send_recv_pipeline=\"0\"}\n)\";\n HloModuleConfig config;\n DebugOptions debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true);\n debug_options.set_xla_gpu_collective_permute_decomposer_threshold(1);\n debug_options.set_xla_gpu_enable_pipelined_p2p(true);\n debug_options.set_xla_gpu_enable_triton_gemm(false);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kModuleStr, config));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr optimized_module,\n GetOptimizedModule(std::move(module)));\n TF_ASSERT_OK(Schedule(optimized_module.get()));\n HloPrintOptions options;\n options.set_print_operand_shape(false);\n options.set_print_result_shape(false);\n TF_ASSERT_OK_AND_ASSIGN(\n bool filecheck_matched,\n RunFileCheck(optimized_module->ToString(options), kExpected));\n EXPECT_TRUE(filecheck_matched);\n}\nclass KernelCacheTest : public HloTestBase {\n public:\n void SetUp() override {\n CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_name_));\n HloModuleConfig config;\n config.set_debug_options(GetDebugOptionsForTest());\n TF_ASSERT_OK_AND_ASSIGN(bool can_use_link_modules,\n dynamic_cast(backend().compiler())\n ->CanUseLinkModules(config));\n if (!can_use_link_modules) {\n GTEST_SKIP() << \"Caching compiled kernels requires support of linking.\";\n }\n }\n DebugOptions GetDebugOptionsForTest() override {\n DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();\n debug_options.set_xla_gpu_kernel_cache_file(cache_file_name_);\n debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(true);\n return debug_options;\n }\n bool CacheFileExists() {\n if (!tsl::Env::Default()->FileExists(cache_file_name_).ok()) {\n return false;\n }\n return true;\n }\n int CacheEntryCount() {\n if (!CacheFileExists()) {\n return 0;\n }\n std::string serialized;\n TF_EXPECT_OK(tsl::ReadFileToString(tsl::Env::Default(), cache_file_name_,\n &serialized));\n CompilationCacheProto proto;\n EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));\n return proto.entries_size();\n }\n std::string cache_file_name_;\n static constexpr absl::string_view kHloText = R\"(\n ENTRY e {\n p = s8[] parameter(0)\n c = s8[] constant(8)\n ROOT _ = s8[] add(p, c)\n })\";\n};\nTEST_F(KernelCacheTest, CacheIsGenerated) {\n EXPECT_FALSE(CacheFileExists());\n EXPECT_TRUE(Run(kHloText, false));\n EXPECT_EQ(CacheEntryCount(), 1);\n EXPECT_TRUE(Run(kHloText, false));\n EXPECT_EQ(CacheEntryCount(), 1);\n}\nTEST_F(KernelCacheTest, NoCacheIsGeneratedWithoutCompiledKernels) {\n EXPECT_FALSE(CacheFileExists());\n EXPECT_TRUE(Run(R\"(\n ENTRY e {\n a = f32[5,5] parameter(0)\n ROOT _ = f32[5,5] custom-call(a, a), custom_call_target=\"__cublas$gemm\",\n backend_config=\"{ \\\"gemm_backend_config\\\": {\\\"alpha_real\\\":1,\\\"beta\\\":0,\\\"dot_dimension_numbers\\\":{\\\"lhs_contracting_dimensions\\\":[\\\"1\\\"],\\\"rhs_contracting_dimensions\\\":[\\\"0\\\"],\\\"lhs_batch_dimensions\\\":[],\\\"rhs_batch_dimensions\\\":[]},\\\"alpha_imag\\\":0,\\\"precision_config\\\":{\\\"operand_precision\\\":[\\\"DEFAULT\\\",\\\"DEFAULT\\\"]},\\\"epilogue\\\":\\\"DEFAULT\\\"}}\"\n })\",\n false));\n EXPECT_FALSE(CacheFileExists());\n}\nTEST_F(KernelCacheTest, CacheGrowsWithNewKernels) {\n EXPECT_FALSE(CacheFileExists());\n EXPECT_TRUE(Run(kHloText, false));\n EXPECT_EQ(CacheEntryCount(), 1);\n EXPECT_TRUE(Run(R\"(\n ENTRY e {\n p = s8[] parameter(0)\n ROOT _ = s8[] multiply(p, p)\n })\",\n false));\n EXPECT_EQ(CacheEntryCount(), 2);\n}\nTEST_F(KernelCacheTest, AllKernelsAreCachedBecauseSplitModuleUsesRoundRobin) {\n EXPECT_FALSE(CacheFileExists());\n EXPECT_TRUE(Run(R\"(\n ENTRY e {\n p = s8[] parameter(0)\n n = s8[] negate(p)\n a = s8[] add(n, n)\n s = s8[] subtract(p, a)\n ROOT _ = s8[] multiply(s, p)\n })\",\n false));\n EXPECT_EQ(CacheEntryCount(), 4);\n}\nTEST_F(KernelCacheTest, CachingWorksWithLoadedExecutables) {\n const std::string kHloAdd1 = R\"(\nadd1 {\n p = s32[] parameter(0)\n c = s32[] constant(1)\n ROOT a = s32[] add(p, c)\n}\nENTRY e {\n p = s32[] parameter(0)\n ROOT r = s32[] fusion(p), kind=kLoop, calls=add1\n})\";\n const std::string kHloAdd2 = R\"(\nadd2 {\n p = s32[] parameter(0)\n c = s32[] constant(2)\n ROOT a = s32[] add(p, c)\n}\nENTRY e {\n p = s32[] parameter(0)\n ROOT r = s32[] fusion(p), kind=kLoop, calls=add2\n})\";\n TF_ASSERT_OK_AND_ASSIGN(se::Platform * platform,\n se::PlatformManager::PlatformWithName(\"cuda\"));\n TF_ASSERT_OK_AND_ASSIGN(se::StreamExecutor * stream_exec,\n platform->ExecutorForDevice(0));\n Compiler* compiler = backend().compiler();\n AotCompilationOptions aot_options(compiler->PlatformId());\n aot_options.set_executor(stream_exec);\n auto test = [this, &compiler, &aot_options](absl::string_view hlo, int input,\n int expected_result) {\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(hlo));\n auto module_group = std::make_unique(std::move(module));\n TF_ASSERT_OK_AND_ASSIGN(\n std::vector> aot_results,\n compiler->CompileAheadOfTime(std::move(module_group), aot_options));\n TF_ASSERT_OK_AND_ASSIGN(std::string serialized_aot_result,\n aot_results[0]->SerializeAsString());\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr aot_result,\n compiler->LoadAotCompilationResult(serialized_aot_result));\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr executable,\n aot_result->LoadExecutable(compiler, aot_options.executor()));\n const xla::Literal literal_input =\n xla::LiteralUtil::CreateR0(input);\n const xla::Literal literal_expected_result =\n xla::LiteralUtil::CreateR0(expected_result);\n TF_ASSERT_OK_AND_ASSIGN(Literal result,\n GetHloRunner().value()->ExecuteWithExecutable(\n executable.get(), {&literal_input}));\n EXPECT_TRUE(LiteralTestUtil::Equal(result, literal_expected_result));\n };\n test(kHloAdd1, 1, 2);\n test(kHloAdd2, 1, 3);\n test(kHloAdd2, 1, 3);\n}\nclass KernelCacheTestSingleThreaded : public KernelCacheTest {\n public:\n DebugOptions GetDebugOptionsForTest() override {\n DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();\n debug_options.set_xla_gpu_force_compilation_parallelism(1);\n return debug_options;\n }\n};\nTEST_F(KernelCacheTestSingleThreaded, CacheIsGenerated) {\n EXPECT_FALSE(CacheFileExists());\n EXPECT_TRUE(Run(kHloText, false));\n EXPECT_EQ(CacheEntryCount(), 1);\n EXPECT_TRUE(Run(kHloText, false));\n EXPECT_EQ(CacheEntryCount(), 1);\n}\nclass NoKernelCacheTest : public KernelCacheTest {\n public:\n DebugOptions GetDebugOptionsForTest() override {\n DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest();\n debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(false);\n return debug_options;\n }\n};\nTEST_F(NoKernelCacheTest, NoCacheWithoutCompilationParallelism) {\n EXPECT_TRUE(Run(kHloText, false));\n EXPECT_FALSE(CacheFileExists());\n}\nTEST_F(GpuCompilerTest, TestFlag_xla_gpu_unsafe_pipelined_loop_annotator) {\n const char* hlo = R\"(\n HloModule test, entry_computation_layout={()->(s32[], s32[])}\n %Body (param: (s32[], s32[])) -> (s32[], s32[]) {\n %param = (s32[], s32[]) parameter(0)\n %i = s32[] get-tuple-element((s32[], s32[]) %param), index=1\n %one = s32[] constant(1)\n %i_plus_one = s32[] add(s32[] %i, s32[] %one)\n %permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}\n ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one)\n }\n %Cond (param.1: (s32[], s32[])) -> pred[] {\n %param.1 = (s32[], s32[]) parameter(0)\n %i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1\n %trip_count = s32[] constant(10)\n ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT\n }\n ENTRY %test () -> (s32[], s32[]) {\n %i_start = s32[] constant(0)\n %p_start = s32[] constant(0)\n %initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start)\n ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop=\"true\"}\n })\";\n const char* kExpected = R\"(\n )\";\n DebugOptions debug_options;\n HloModuleConfig config;\n debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true);\n config.set_debug_options(debug_options);\n config.set_num_partitions(4);\n config.set_use_spmd_partitioning(true);\n TF_ASSERT_OK_AND_ASSIGN(auto unoptimized_module,\n ParseAndReturnVerifiedModule(hlo, config));\n TF_ASSERT_OK_AND_ASSIGN(auto optimized_module,\n GetOptimizedModule(std::move(unoptimized_module)));\n HloPrintOptions options;\n options.set_print_operand_shape(false);\n options.set_print_result_shape(false);\n TF_ASSERT_OK_AND_ASSIGN(\n bool filecheck_matched,\n RunFileCheck(optimized_module->ToString(options), kExpected));\n EXPECT_TRUE(filecheck_matched);\n}\nusing GpuCompilerPassTest = GpuCompilerTest;\nTEST_F(GpuCompilerPassTest,\n GpuCompilerRunsTritonGemmRewriterByDefaultFromAmpere) {\n if (std::holds_alternative(GpuComputeComp())) {\n GTEST_SKIP() << \"TritonGemmRewriter disabled for ROCm until autotuner \"\n << \"is included.\";\n }\n auto cc = backend()\n .default_stream_executor()\n ->GetDeviceDescription()\n .cuda_compute_capability();\n bool is_rocm = std::holds_alternative(\n backend()\n .default_stream_executor()\n ->GetDeviceDescription()\n .gpu_compute_capability());\n bool expect_triton_gemm_rewriter_has_run = cc.IsAtLeastAmpere() || is_rocm;\n constexpr absl::string_view constant_module = R\"(\nHloModule noop\nENTRY main {\n ROOT constant = f32[] constant(0)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(constant_module));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr optimized_module,\n GetOptimizedModule(std::move(module)));\n const HloModuleMetadataProto& module_metadata =\n optimized_module->metadata()->proto();\n bool triton_gemm_rewriter_has_run = false;\n for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) {\n triton_gemm_rewriter_has_run |=\n pass_metadata.pass_name() == \"triton-gemm-rewriter\";\n }\n EXPECT_EQ(triton_gemm_rewriter_has_run, expect_triton_gemm_rewriter_has_run);\n}\nTEST_F(GpuCompilerPassTest,\n GpuCompilerRunsCustomKernelFusionByDefaultFromVolta) {\n auto cc = backend()\n .default_stream_executor()\n ->GetDeviceDescription()\n .cuda_compute_capability();\n bool expect_custom_kernel_fusion_rewriter_has_run =\n cc.major == se::CudaComputeCapability::VOLTA;\n constexpr absl::string_view constant_module = R\"(\nHloModule noop\nENTRY main {\n ROOT constant = f32[] constant(0)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(constant_module));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr optimized_module,\n GetOptimizedModule(std::move(module)));\n const HloModuleMetadataProto& module_metadata =\n optimized_module->metadata()->proto();\n bool custom_kernel_fusion_rewriter_has_run = false;\n for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) {\n custom_kernel_fusion_rewriter_has_run |=\n pass_metadata.pass_name() == \"custom-kernel-fusion-rewriter\";\n }\n EXPECT_EQ(custom_kernel_fusion_rewriter_has_run,\n expect_custom_kernel_fusion_rewriter_has_run);\n}\nstruct PassRunIndex {\n int first_run = std::numeric_limits::max();\n int last_run = std::numeric_limits::min();\n};\nvoid VerifyPassOrder(\n const absl::flat_hash_map& passes,\n absl::string_view before, absl::string_view after) {\n ASSERT_TRUE(passes.contains(before))\n << \"Expected pass did not run: \" << before;\n ASSERT_TRUE(passes.contains(after)) << \"Expected pass did not run: \" << after;\n EXPECT_LT(passes.at(before).last_run, passes.at(after).first_run)\n << \"Pass \" << before << \" ran after \" << after;\n}\nabsl::flat_hash_map GatherPassOrderInformation(\n const HloModule& module) {\n absl::flat_hash_map passes;\n int run_index = 0;\n for (const HloPassMetadata& pass_metadata :\n module.metadata().proto().pass_metadata()) {\n auto& pass = passes[pass_metadata.pass_name()];\n pass.first_run = std::min(pass.first_run, run_index);\n pass.last_run = std::max(pass.last_run, run_index);\n ++run_index;\n }\n return passes;\n}\nTEST_F(GpuCompilerPassTest, PassesAreRunInCorrectOrder) {\n constexpr absl::string_view constant_module = R\"(\nENTRY main {\n ROOT constant = f32[] constant(0)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(constant_module));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr optimized_module,\n GetOptimizedModule(std::move(module)));\n absl::flat_hash_map passes =\n GatherPassOrderInformation(*optimized_module);\n VerifyPassOrder(passes, \"layout-assignment\",\n \"priority-fusion\");\n VerifyPassOrder(passes, \"layout-assignment\",\n \"layout_normalization\");\n VerifyPassOrder(passes, \"host-offload-legalize\",\n \"layout_normalization\");\n}\nTEST_F(GpuCompilerPassTest, FusionBlockLevelRewriterRunsAfterAllFusionPasses) {\n auto cc = backend()\n .default_stream_executor()\n ->GetDeviceDescription()\n .cuda_compute_capability();\n if (!cc.IsAtLeastAmpere()) {\n GTEST_SKIP() << \"FusionBlockLevelRewriter requires Ampere+ to run.\";\n }\n constexpr absl::string_view constant_module = R\"(\nENTRY main {\n ROOT constant = f32[] constant(0)\n})\";\n HloModuleConfig config;\n DebugOptions debug_options = GetDebugOptionsForTest();\n debug_options.set_xla_gpu_experimental_enable_fusion_block_level_rewriter(\n true);\n config.set_debug_options(debug_options);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(constant_module, config));\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr optimized_module,\n GetOptimizedModule(std::move(module)));\n absl::flat_hash_map passes =\n GatherPassOrderInformation(*optimized_module);\n absl::string_view kFusionBlockLevelRewriterName =\n \"fusion-block-level-rewriter\";\n for (const auto& [pass_name, _] : passes) {\n if (pass_name != kFusionBlockLevelRewriterName &&\n absl::StrContains(pass_name, \"fusion\")) {\n VerifyPassOrder(passes, pass_name,\n kFusionBlockLevelRewriterName);\n VLOG(2) << \"Verified pass order: \" << pass_name << \" -> \"\n << kFusionBlockLevelRewriterName;\n }\n }\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":11,"numItemsPerPage":100,"numTotalItems":3487,"offset":1100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjY0NDU2NCwic3ViIjoiL2RhdGFzZXRzL0NQUC1VVC1CRU5DSC9jcHBfdW5pdF90ZXN0c19iZW5jaG1hcmtfZGF0YSIsImV4cCI6MTc1NjY0ODE2NCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.J_RtrHniYkqugjKEzkPSBdzoRUcBJckLSJDhc5Dtu-yLIndZopsYIy_fqbswyiOvn7P2HAP09ef6p1E7L32QCg","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
ID
stringlengths
36
36
Language
stringclasses
1 value
Repository Name
stringclasses
13 values
File Name
stringlengths
2
48
File Path in Repository
stringlengths
11
111
File Path for Unit Test
stringlengths
13
116
Code
stringlengths
0
278k
Unit Test - (Ground Truth)
stringlengths
78
663k
Code Url
stringlengths
91
198
Test Code Url
stringlengths
93
203
Commit Hash
stringclasses
13 values
16498d21-1d10-4b45-ae7a-9b43a041a5b6
cpp
tensorflow/tensorflow
memory_space_propagation
third_party/xla/xla/service/memory_space_propagation.cc
third_party/xla/xla/service/memory_space_propagation_test.cc
#include "xla/service/memory_space_propagation.h" #include <cstdint> #include "xla/shape.h" #include "xla/shape_util.h" namespace xla { absl::StatusOr<bool> MemorySpacePropagation::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool modified = false; TF_ASSIGN_OR_RETURN(auto dataflow_analysis, HloDataflowAnalysis::Run(*module, false, true)); dataflow_analysis_ = std::move(dataflow_analysis); for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kFusion) { for (int operand_idx = 0; operand_idx < instruction->fused_parameters().size(); ++operand_idx) { ShapeUtil::ForEachLeafShape( instruction->operand(operand_idx)->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { int64_t memory_space = sub_shape.layout().memory_space(); modified |= Propagate(index, instruction->fused_parameter(operand_idx), memory_space); }); } ShapeUtil::ForEachLeafShape( instruction->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { int64_t memory_space = sub_shape.layout().memory_space(); modified |= Propagate(index, instruction->fused_expression_root(), memory_space); }); } } } return modified; } bool MemorySpacePropagation::Propagate(ShapeIndexView index, const HloInstruction* callee_instruction, int64_t memory_space) const { bool modified = false; const HloValue& value = dataflow_analysis_->GetUniqueValueAt( callee_instruction, ShapeIndex(index)); for (const HloPosition& position : value.positions()) { HloInstruction* instruction = position.instruction; Shape* shape = ShapeUtil::GetMutableSubshape(instruction->mutable_shape(), position.index); if (shape->layout().memory_space() == memory_space) { continue; } shape->mutable_layout()->set_memory_space(memory_space); modified = true; if (instruction->opcode() == HloOpcode::kFusion) { Propagate(position.index, instruction->fused_expression_root(), memory_space); } const HloInstruction* parent_fusion = instruction->parent()->FusionInstruction(); if (instruction == instruction->parent()->root_instruction() && parent_fusion->parent()->IsFusionComputation()) { Propagate(position.index, parent_fusion, memory_space); } if (instruction->opcode() == HloOpcode::kParameter && parent_fusion->parent()->IsFusionComputation()) { const HloInstruction* fusion_operand = parent_fusion->operand(instruction->parameter_number()); Propagate(position.index, fusion_operand, memory_space); } } for (const HloUse& use : value.GetUses()) { if (use.instruction->opcode() == HloOpcode::kFusion) { modified |= Propagate( use.operand_index, use.instruction->fused_parameter(use.operand_number), memory_space); } } return modified; } }
#include "xla/service/memory_space_propagation.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" namespace xla { namespace { class MemorySpacePropagationTest : public HloTestBase { public: MemorySpacePropagationTest() : HloTestBase(), verifier_(false, false) { } absl::Status Verify(HloModule* module) { return verifier_.Run(module).status(); } private: HloVerifier verifier_; }; TEST_F(MemorySpacePropagationTest, NoMemorySpace) { absl::string_view hlo_string = R"( HloModule NoMemorySpace %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)} copy(%param2) %fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_FALSE(memory_space_propagation.Run(module.get()).value()); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, NonTupleOutput) { absl::string_view hlo_string = R"( HloModule NonTupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; absl::string_view expected_hlo_string = R"( HloModule NonTupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, TupleOutput) { absl::string_view hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation %gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0 %gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1 ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1) } )"; absl::string_view expected_hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation %gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0 %gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1 ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, NestedInputFusion) { absl::string_view hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[3,2]{0,1:T(128)} parameter(0) ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[3,2]{0,1:T(128)} parameter(0) %fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1) } ENTRY %entry { %param0 = s32[3,2]{0,1:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; absl::string_view expected_hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[3,2]{0,1:T(128)S(1)} parameter(0) ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[3,2]{0,1:T(128)S(1)} parameter(0) %fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1) } ENTRY %entry { %param0 = s32[3,2]{0,1:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, NestedOutputFusion) { absl::string_view hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[6]{0:T(128)} parameter(0) ROOT %bitcast = s32[3,2]{0,1:T(128)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %fusion.1 = s32[3,2]{0,1:T(128)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion) } )"; absl::string_view expected_hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[6]{0:T(128)} parameter(0) ROOT %bitcast = s32[3,2]{0,1:T(128)S(1)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1) ROOT %fusion.1 = s32[3,2]{0,1:T(128)S(1)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, BitcastInFusion) { absl::string_view hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) %bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation } )"; absl::string_view expected_hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)S(1)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) %bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)S(1)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c7a66d7d-dadc-40c4-8d3c-c32222c567aa
cpp
tensorflow/tensorflow
xla_debug_info_manager
third_party/xla/xla/service/xla_debug_info_manager.cc
third_party/xla/xla/service/xla_debug_info_manager_test.cc
#include "xla/service/xla_debug_info_manager.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/synchronization/mutex.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_proto_util.h" namespace xla { void XlaDebugInfoManager::RegisterModule( std::shared_ptr<const HloModule> hlo_module, BufferAssignmentProto buffer_assignment) { CHECK(hlo_module != nullptr); absl::MutexLock lock(&mutex_); auto result = modules_.try_emplace(hlo_module->unique_id()); CHECK(result.second); XlaModuleEntry& m = result.first->second; m.hlo_module = std::move(hlo_module); m.buffer_assignment = std::move(buffer_assignment); m.active = true; } void XlaDebugInfoManager::UnregisterModule(ModuleIdentifier module_id) { absl::MutexLock lock(&mutex_); auto it = modules_.find(module_id); CHECK(it != modules_.end()); if (!tracing_active_) { modules_.erase(it); } else { XlaModuleEntry& m = it->second; m.active = false; } } void XlaDebugInfoManager::StartTracing() { absl::MutexLock lock(&mutex_); tracing_active_ = true; } void XlaDebugInfoManager::StopTracing( std::vector<std::unique_ptr<HloProto>>* module_debug_info) { std::vector<XlaModuleEntry> modules_to_serialize; { absl::MutexLock lock(&mutex_); if (!tracing_active_) return; tracing_active_ = false; modules_to_serialize.reserve(modules_.size()); for (auto it = modules_.begin(); it != modules_.end();) { auto& m = it->second; auto cur_it = it++; if (!m.active) { modules_to_serialize.emplace_back(std::move(m)); modules_.erase(cur_it); } else { modules_to_serialize.emplace_back(m); } } } if (module_debug_info) { module_debug_info->clear(); for (const auto& m : modules_to_serialize) { auto hlo_proto = std::make_unique<HloProto>(MakeHloProto(*m.hlo_module)); *hlo_proto->mutable_buffer_assignment() = m.buffer_assignment; module_debug_info->emplace_back(std::move(hlo_proto)); } } } bool XlaDebugInfoManager::TracksModule(ModuleIdentifier module_id) const { absl::MutexLock lock(&mutex_); return modules_.find(module_id) != modules_.end(); } }
#include "xla/service/xla_debug_info_manager.h" #include <memory> #include <string> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/container/flat_hash_set.h" #include "absl/synchronization/mutex.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_module_config.h" #include "xla/tests/hlo_test_base.h" namespace xla { class XlaDebugInfoManagerTestPeer { public: void RegisterModule(std::shared_ptr<const HloModule> hlo_module, BufferAssignmentProto buffer_assignment) { return xla_debug_info_manager_.RegisterModule(hlo_module, std::move(buffer_assignment)); } void UnregisterModule(ModuleIdentifier module_id) { return xla_debug_info_manager_.UnregisterModule(module_id); } void StartTracing() { return xla_debug_info_manager_.StartTracing(); } absl::flat_hash_set<ModuleIdentifier> StopTracing() { std::vector<std::unique_ptr<HloProto>> module_debug_info; xla_debug_info_manager_.StopTracing(&module_debug_info); absl::flat_hash_set<ModuleIdentifier> module_ids; for (const auto& hlo_proto : module_debug_info) { module_ids.insert(hlo_proto->hlo_module().id()); } return module_ids; } absl::flat_hash_set<ModuleIdentifier> GetModuleIds() { absl::flat_hash_set<ModuleIdentifier> module_ids; absl::MutexLock lock(&xla_debug_info_manager_.mutex_); for (const auto& it : xla_debug_info_manager_.modules_) { module_ids.insert(it.first); } return module_ids; } private: XlaDebugInfoManager xla_debug_info_manager_; }; namespace { using ::testing::IsEmpty; using ::testing::UnorderedElementsAre; class XlaDebugInfoManagerTest : public HloTestBase { protected: struct DebugMetadata { ModuleIdentifier unique_id; std::shared_ptr<HloModule> module; }; ModuleIdentifier RegisterProgram(const std::string& module_name) { DebugMetadata debug_info; HloModuleConfig config; debug_info.module = std::make_shared<HloModule>(module_name, config); ModuleIdentifier unique_id = debug_info.module->unique_id(); debug_info.unique_id = unique_id; xla_debug_info_manager_.RegisterModule(debug_info.module, BufferAssignmentProto()); external_references_.push_back(std::move(debug_info)); return unique_id; } void UnregisterProgram(ModuleIdentifier unique_id) { for (int i = 0; i < external_references_.size(); i++) { if (external_references_[i].unique_id == unique_id) { xla_debug_info_manager_.UnregisterModule(unique_id); external_references_.erase(external_references_.begin() + i); break; } } } absl::flat_hash_set<ModuleIdentifier> GetModuleIds() { return xla_debug_info_manager_.GetModuleIds(); } void StartTrace() { xla_debug_info_manager_.StartTracing(); } absl::flat_hash_set<ModuleIdentifier> StopTrace() { return xla_debug_info_manager_.StopTracing(); } std::vector<DebugMetadata> external_references_; XlaDebugInfoManagerTestPeer xla_debug_info_manager_; }; TEST_F(XlaDebugInfoManagerTest, NoTraceBasic) { auto program0 = RegisterProgram("program0"); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0)); auto program1 = RegisterProgram("program1"); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0, program1)); UnregisterProgram(program0); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1)); UnregisterProgram(program1); EXPECT_TRUE(GetModuleIds().empty()); } TEST_F(XlaDebugInfoManagerTest, NoTraceDuplicateIds) { auto program0A = RegisterProgram("program0"); auto program0B = RegisterProgram("program0"); auto program1 = RegisterProgram("program1"); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A, program0B, program1)); UnregisterProgram(program1); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A, program0B)); UnregisterProgram(program0A); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B)); UnregisterProgram(program0B); EXPECT_THAT(GetModuleIds(), IsEmpty()); } TEST_F(XlaDebugInfoManagerTest, ActiveTrace) { auto program0A = RegisterProgram("program0"); auto program0B = RegisterProgram("program0"); auto program1 = RegisterProgram("program1"); StartTrace(); auto program2 = RegisterProgram("program2"); EXPECT_THAT(StopTrace(), UnorderedElementsAre(program0A, program0B, program1, program2)); StartTrace(); EXPECT_THAT(StopTrace(), UnorderedElementsAre(program0A, program0B, program1, program2)); UnregisterProgram(program2); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A, program0B, program1)); UnregisterProgram(program0A); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B, program1)); UnregisterProgram(program0B); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1)); UnregisterProgram(program1); EXPECT_THAT(GetModuleIds(), IsEmpty()); } TEST_F(XlaDebugInfoManagerTest, UnregisterDuringTrace) { auto program0A = RegisterProgram("program0"); auto program0B = RegisterProgram("program0"); auto program1 = RegisterProgram("program1"); StartTrace(); UnregisterProgram(program1); UnregisterProgram(program0B); EXPECT_THAT(StopTrace(), UnorderedElementsAre(program0A, program0B, program1)); EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A)); UnregisterProgram(program0A); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/xla_debug_info_manager.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/xla_debug_info_manager_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
04ba64e2-bf21-4b89-ab30-0f9608f75f9e
cpp
tensorflow/tensorflow
convert_operand_folding
third_party/xla/xla/service/convert_operand_folding.cc
third_party/xla/xla/service/convert_operand_folding_test.cc
#include "xla/service/convert_operand_folding.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" namespace xla { namespace { bool IsUpcastConvert(const HloInstruction* hlo) { if (!hlo->shape().IsArray()) { return false; } switch (hlo->opcode()) { case HloOpcode::kDynamicSlice: case HloOpcode::kGather: case HloOpcode::kReshape: case HloOpcode::kSlice: case HloOpcode::kTranspose: { return IsUpcastConvert(hlo->operand(0)); } case HloOpcode::kReduce: { if (ShapeUtil::ElementsIn(hlo->shape()) == ShapeUtil::ElementsIn(hlo->operand(0)->shape())) { return IsUpcastConvert(hlo->operand(0)); } return false; } case HloOpcode::kConvert: return primitive_util::CastPreservesValues( hlo->operand(0)->shape().element_type(), hlo->shape().element_type()); default: return false; } } HloInstruction* EffectiveOperand(HloInstruction* hlo) { switch (hlo->opcode()) { case HloOpcode::kBroadcast: case HloOpcode::kDynamicSlice: case HloOpcode::kGather: case HloOpcode::kReshape: case HloOpcode::kSlice: case HloOpcode::kTranspose: { HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0)); HloInstruction* clone = hlo->AddInstruction(hlo->Clone()); *(clone->mutable_shape()) = ShapeUtil::ChangeElementType( clone->shape(), operand->shape().element_type()); clone->ReplaceOperandWithDifferentShape(0, operand).IgnoreError(); return clone; } case HloOpcode::kReduce: { HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0)); return hlo->AddInstruction(HloInstruction::CreateReshape( ShapeUtil::ChangeElementType(hlo->shape(), operand->shape().element_type()), operand)); } case HloOpcode::kConvert: return hlo->mutable_operand(0); default: return nullptr; } } } bool ConvertOperandFolding::InstructionMatchesPattern( HloInstruction* instruction) { if (instruction->opcode() != HloOpcode::kDot && instruction->opcode() != HloOpcode::kConvolution) { return false; } for (auto* operand : instruction->operands()) { if (IsUpcastConvert(operand)) { return true; } } return false; } absl::StatusOr<HloInstruction*> ConvertOperandFolding::ExpandInstruction( HloInstruction* instruction) { for (int i = 0; i < instruction->operand_count(); ++i) { auto* operand = instruction->mutable_operand(i); if (IsUpcastConvert(operand)) { TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape( i, EffectiveOperand(operand))); } } return nullptr; } }
#include "xla/service/convert_operand_folding.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = ::xla::testing::opcode_matchers; using ConvertOperandFoldingTest = HloTestBase; TEST_F(ConvertOperandFoldingTest, IntegralUpcastConvertFolded) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = s8[2,3]{1,0} parameter(0) p1 = s16[3,2]{0,1} parameter(1) c0 = s16[2,3]{1,0} convert(p0) c1 = s16[3,2]{0,1} convert(p1) ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool folded, ConvertOperandFolding().Run(module.get())); EXPECT_TRUE(folded); EXPECT_THAT(module->entry_computation()->root_instruction(), AllOf(op::Dot(op::Parameter(0), op::Parameter(1)), op::Shape("s16[2,2]{1,0}"))); } TEST_F(ConvertOperandFoldingTest, FloatingUpcastConvertFolded) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = f16[2,3]{1,0} parameter(0) p1 = bf16[3,2]{0,1} parameter(1) c0 = f32[2,3]{1,0} convert(p0) c1 = f32[3,2]{0,1} convert(p1) ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool folded, ConvertOperandFolding().Run(module.get())); EXPECT_TRUE(folded); EXPECT_THAT(module->entry_computation()->root_instruction(), AllOf(op::Dot(op::Parameter(0), op::Parameter(1)), op::Shape("f32[2,2]{1,0}"))); } TEST_F(ConvertOperandFoldingTest, IntegralToFloatingConvertFolded) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = s8[2,3]{1,0} parameter(0) p1 = s16[3,2]{0,1} parameter(1) c0 = f16[2,3]{1,0} convert(p0) c1 = f32[3,2]{0,1} convert(p1) ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool folded, ConvertOperandFolding().Run(module.get())); EXPECT_TRUE(folded); EXPECT_THAT(module->entry_computation()->root_instruction(), AllOf(op::Dot(op::Parameter(0), op::Parameter(1)), op::Shape("f32[2,2]{1,0}"))); } TEST_F(ConvertOperandFoldingTest, DowncastConvertNotFolded) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = s32[2,3]{1,0} parameter(0) p1 = s16[3,2]{0,1} parameter(1) c0 = s16[2,3]{1,0} convert(p0) c1 = s8[3,2]{0,1} convert(p1) ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool folded, ConvertOperandFolding().Run(module.get())); EXPECT_FALSE(folded); EXPECT_THAT( module->entry_computation()->root_instruction(), AllOf( op::Dot( AllOf(op::Convert(op::Parameter(0)), op::Shape("s16[2,3]{1,0}")), AllOf(op::Convert(op::Parameter(1)), op::Shape("s8[3,2]{0,1}"))), op::Shape("s16[2,2]{1,0}"))); } TEST_F(ConvertOperandFoldingTest, OneOperandFolded) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = s8[2,3]{1,0} parameter(0) p1 = s16[3,2]{0,1} parameter(1) c0 = s16[2,3]{1,0} convert(p0) c1 = s8[3,2]{0,1} convert(p1) ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool folded, ConvertOperandFolding().Run(module.get())); EXPECT_TRUE(folded); EXPECT_THAT( module->entry_computation()->root_instruction(), AllOf(op::Dot(op::Parameter(0), AllOf(op::Convert(op::Parameter(1)), op::Shape("s8[3,2]{0,1}"))), op::Shape("s16[2,2]{1,0}"))); } TEST_F(ConvertOperandFoldingTest, FoldedWithFormatting) { absl::string_view module_string = R"( HloModule module sum { a = s16[] parameter(0) b = s16[] parameter(1) ROOT r = add(a,b) } ENTRY main { p0 = s8[3,10] parameter(0) c0 = s16[3,10] convert(p0) r0 = s16[3,2,5] reshape(c0) t0 = s16[2,5,3] transpose(r0), dimensions={1,2,0} s0 = s16[2,1,3] slice(t0), slice={[0:2], [2:3], [0:3]} rs0 = s16[2,3] reshape(s0) p1 = s8[3,1,2] parameter(1) c1 = s16[3,1,2] convert(p1) r1 = s16[1,3,2] transpose(c1), dimensions={1,0,2} z = s16[] constant(0) rr1 = s16[3,2] reduce(r1,z), dimensions={0}, to_apply=sum ROOT dot = s16[2,2] dot(rs0, rr1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool folded, ConvertOperandFolding().Run(module.get())); EXPECT_TRUE(folded); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Dot( op::Reshape(op::Slice(op::Transpose(op::Reshape(op::Parameter(0))))), op::Reshape(op::Transpose(op::Parameter(1))))); } TEST_F(ConvertOperandFoldingTest, FoldedWithDSAndGather) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = s8[100,3] parameter(0) c0 = s16[100,3] convert(p0) ids = s32[20] parameter(2) g = s16[20,3] gather(c0, ids), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3} t = s16[3,20] transpose(g), dimensions={1,0} p1 = s8[25,3] parameter(1) c1 = s16[25,3] convert(p1) z = s32[] constant(0) s = s32[] parameter(3) ds = s16[20,3] dynamic-slice(c1, s, z), dynamic_slice_sizes={20,3} ROOT dot = s16[3,3] dot(t, ds), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool folded, ConvertOperandFolding().Run(module.get())); EXPECT_TRUE(folded); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Dot(op::Transpose(op::Gather(op::Parameter(0), op::Parameter(2))), op::DynamicSlice(op::Parameter(1), op::Parameter(3), op::Constant()))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_operand_folding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_operand_folding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
31d6bc83-735f-44e1-8e06-a62edd6d24b3
cpp
tensorflow/tensorflow
convert_memory_placement_to_internal_annotations
third_party/xla/xla/service/convert_memory_placement_to_internal_annotations.cc
third_party/xla/xla/service/convert_memory_placement_to_internal_annotations_test.cc
#include "xla/service/convert_memory_placement_to_internal_annotations.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/host_memory_offload_annotations.h" #include "xla/side_effect_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" namespace xla { absl::StatusOr<bool> ConvertMemoryPlacementToInternalAnnotations::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* c : module->MakeNonfusionComputations()) { for (HloInstruction* instruction : c->MakeInstructionPostOrder()) { if (instruction->IsCustomCall( host_memory_offload_annotations::kDevicePlacement)) { const auto& frontend_attributes = instruction->frontend_attributes(); const auto it = frontend_attributes.map().find(kXlaBufferPlacementAttr); if (it == frontend_attributes.map().end()) { continue; } const bool is_to_host_case = (it->second == host_memory_offload_annotations::kMemoryTargetPinnedHost || it->second == host_memory_offload_annotations::kMemoryTargetUnpinnedHost); const bool is_to_device_case = (it->second == host_memory_offload_annotations::kMemoryTargetDevice); if (!is_to_host_case && !is_to_device_case) { continue; } if (is_to_host_case) { VLOG(1) << "Process forward case: " << instruction->ToString(); if (instruction->operand_count() != 1) { return Internal( "Custom calls with target %s must have exactly one operand. %s " "has %d.", host_memory_offload_annotations::kDevicePlacement, instruction->name(), instruction->operand_count()); } HloInstruction* input = instruction->mutable_operand(0); HloInstruction* move_to_host_custom_call = c->AddInstruction(HloInstruction::CreateCustomCall( input->shape(), {input}, host_memory_offload_annotations:: kMoveToHostCustomCallTarget)); if (instruction->has_sharding()) { move_to_host_custom_call->set_sharding(instruction->sharding()); } TF_RETURN_IF_ERROR( instruction->ReplaceAllUsesWith(move_to_host_custom_call)); TF_RETURN_IF_ERROR( c->RemoveInstructionAndUnusedOperands(instruction)); changed = true; } else if (is_to_device_case) { VLOG(1) << "Process backward case: " << instruction->ToString(); HloInstruction* custom_call_operand = instruction->mutable_operand(0); HloInstruction* new_result = c->AddInstruction(HloInstruction::CreateCustomCall( custom_call_operand->shape(), {custom_call_operand}, host_memory_offload_annotations:: kMoveToDeviceCustomCallTarget)); TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_result)); TF_RETURN_IF_ERROR( c->RemoveInstructionAndUnusedOperands(instruction)); changed = true; } } } } return changed; } }
#include "xla/service/convert_memory_placement_to_internal_annotations.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <string_view> #include <vector> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "xla/service/host_memory_offload_annotations.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { class ConvertMemoryPlacementToInternalAnnotationsTest : public HloTestBase { public: ConvertMemoryPlacementToInternalAnnotationsTest() = default; }; TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest, ConvertPinnedHostTest) { const char* hlo_string = R"( HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}} region_0.9 { arg_tuple.10 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.11 = s32[] get-tuple-element(arg_tuple.10), index=0 constant.15 = s32[] constant(1) add.33 = s32[] add(get-tuple-element.11, constant.15) get-tuple-element.12 = f32[16]{0} get-tuple-element(arg_tuple.10), index=1 sine.18 = f32[16]{0} sine(get-tuple-element.12) sine.19 = f32[16]{0} sine(sine.18) sine.20 = f32[16]{0} sine(sine.19) get-tuple-element.13 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=2 custom-call.21 = f32[16]{0} custom-call(sine.19), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"} reshape.23 = f32[1,16]{1,0} reshape(custom-call.21) constant.17 = s32[] constant(0) compare.24 = pred[] compare(get-tuple-element.11, constant.17), direction=LT constant.16 = s32[] constant(16) add.25 = s32[] add(get-tuple-element.11, constant.16) select.26 = s32[] select(compare.24, add.25, get-tuple-element.11) dynamic-update-slice.27 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.13, reshape.23, select.26, constant.17) get-tuple-element.14 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=3 custom-call.22 = f32[16]{0} custom-call(sine.20), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"} reshape.28 = f32[1,16]{1,0} reshape(custom-call.22) compare.29 = pred[] compare(get-tuple-element.11, constant.17), direction=LT add.30 = s32[] add(get-tuple-element.11, constant.16) select.31 = s32[] select(compare.29, add.30, get-tuple-element.11) dynamic-update-slice.32 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.14, reshape.28, select.31, constant.17) ROOT tuple.34 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.33, sine.20, dynamic-update-slice.27, dynamic-update-slice.32) } region_1.35 { arg_tuple.36 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.38 = f32[16]{0} get-tuple-element(arg_tuple.36), index=1 get-tuple-element.39 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=2 get-tuple-element.40 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=3 get-tuple-element.37 = s32[] get-tuple-element(arg_tuple.36), index=0 constant.41 = s32[] constant(16) ROOT compare.42 = pred[] compare(get-tuple-element.37, constant.41), direction=LT } core_closed_call.43 { constant.47 = s32[] constant(0) Arg_0.44 = f32[16]{0} parameter(0) constant.45 = f32[] constant(0) broadcast.46 = f32[16,16]{1,0} broadcast(constant.45), dimensions={} tuple.48 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.47, Arg_0.44, broadcast.46, broadcast.46) while.49 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.48), condition=region_1.35, body=region_0.9 get-tuple-element.50 = s32[] get-tuple-element(while.49), index=0 get-tuple-element.51 = f32[16]{0} get-tuple-element(while.49), index=1 get-tuple-element.52 = f32[16,16]{1,0} get-tuple-element(while.49), index=2 get-tuple-element.53 = f32[16,16]{1,0} get-tuple-element(while.49), index=3 ROOT tuple.54 = (f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(get-tuple-element.52, get-tuple-element.53) } region_2.65 { arg_tuple.66 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.67 = s32[] get-tuple-element(arg_tuple.66), index=0 constant.74 = s32[] constant(1) add.108 = s32[] add(get-tuple-element.67, constant.74) get-tuple-element.73 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=6 constant.76 = s32[] constant(0) compare.82 = pred[] compare(get-tuple-element.67, constant.76), direction=LT constant.75 = s32[] constant(16) add.83 = s32[] add(get-tuple-element.67, constant.75) select.84 = s32[] select(compare.82, add.83, get-tuple-element.67) dynamic-slice.85 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.73, select.84, constant.76), dynamic_slice_sizes={1,16} reshape.86 = f32[16]{0} reshape(dynamic-slice.85) custom-call.87 = f32[16]{0} custom-call(reshape.86), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"} get-tuple-element.69 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=2 get-tuple-element.68 = f32[16]{0} get-tuple-element(arg_tuple.66), index=1 cosine.88 = f32[16]{0} cosine(get-tuple-element.68) reshape.93 = f32[1,16]{1,0} reshape(cosine.88) compare.94 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.95 = s32[] add(get-tuple-element.67, constant.75) select.96 = s32[] select(compare.94, add.95, get-tuple-element.67) dynamic-update-slice.97 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.69, reshape.93, select.96, constant.76) get-tuple-element.70 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=3 sine.89 = f32[16]{0} sine(get-tuple-element.68) cosine.90 = f32[16]{0} cosine(sine.89) reshape.98 = f32[1,16]{1,0} reshape(cosine.90) compare.99 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.100 = s32[] add(get-tuple-element.67, constant.75) select.101 = s32[] select(compare.99, add.100, get-tuple-element.67) dynamic-update-slice.102 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.70, reshape.98, select.101, constant.76) get-tuple-element.71 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=4 get-tuple-element.72 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=5 compare.77 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.78 = s32[] add(get-tuple-element.67, constant.75) select.79 = s32[] select(compare.77, add.78, get-tuple-element.67) dynamic-slice.80 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.72, select.79, constant.76), dynamic_slice_sizes={1,16} reshape.81 = f32[16]{0} reshape(dynamic-slice.80) custom-call.91 = f32[16]{0} custom-call(reshape.81), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"} cosine.92 = f32[16]{0} cosine(custom-call.91) reshape.103 = f32[1,16]{1,0} reshape(cosine.92) compare.104 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.105 = s32[] add(get-tuple-element.67, constant.75) select.106 = s32[] select(compare.104, add.105, get-tuple-element.67) dynamic-update-slice.107 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.71, reshape.103, select.106, constant.76) ROOT tuple.109 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.108, custom-call.87, dynamic-update-slice.97, dynamic-update-slice.102, dynamic-update-slice.107, get-tuple-element.72, get-tuple-element.73) } region_3.110 { arg_tuple.111 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.113 = f32[16]{0} get-tuple-element(arg_tuple.111), index=1 get-tuple-element.114 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=2 get-tuple-element.115 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=3 get-tuple-element.116 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=4 get-tuple-element.117 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=5 get-tuple-element.118 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=6 get-tuple-element.112 = s32[] get-tuple-element(arg_tuple.111), index=0 constant.119 = s32[] constant(16) ROOT compare.120 = pred[] compare(get-tuple-element.112, constant.119), direction=LT } region_4.130 { arg_tuple.131 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.132 = s32[] get-tuple-element(arg_tuple.131), index=0 constant.140 = s32[] constant(1) add.164 = s32[] add(get-tuple-element.132, constant.140) get-tuple-element.133 = f32[16]{0} get-tuple-element(arg_tuple.131), index=1 get-tuple-element.134 = f32[] get-tuple-element(arg_tuple.131), index=2 broadcast.159 = f32[16]{0} broadcast(get-tuple-element.134), dimensions={} add.160 = f32[16]{0} add(get-tuple-element.133, broadcast.159) get-tuple-element.137 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=5 constant.141 = s32[] constant(16) subtract.142 = s32[] subtract(constant.141, get-tuple-element.132) subtract.143 = s32[] subtract(subtract.142, constant.140) constant.139 = s32[] constant(0) compare.154 = pred[] compare(subtract.143, constant.139), direction=LT add.155 = s32[] add(subtract.143, constant.141) select.156 = s32[] select(compare.154, add.155, subtract.143) dynamic-slice.157 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.137, select.156, constant.139), dynamic_slice_sizes={1,16} reshape.158 = f32[16]{0} reshape(dynamic-slice.157) multiply.161 = f32[16]{0} multiply(add.160, reshape.158) get-tuple-element.136 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=4 compare.149 = pred[] compare(subtract.143, constant.139), direction=LT add.150 = s32[] add(subtract.143, constant.141) select.151 = s32[] select(compare.149, add.150, subtract.143) dynamic-slice.152 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.136, select.151, constant.139), dynamic_slice_sizes={1,16} reshape.153 = f32[16]{0} reshape(dynamic-slice.152) multiply.162 = f32[16]{0} multiply(multiply.161, reshape.153) get-tuple-element.135 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=3 compare.144 = pred[] compare(subtract.143, constant.139), direction=LT add.145 = s32[] add(subtract.143, constant.141) select.146 = s32[] select(compare.144, add.145, subtract.143) dynamic-slice.147 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.135, select.146, constant.139), dynamic_slice_sizes={1,16} reshape.148 = f32[16]{0} reshape(dynamic-slice.147) multiply.163 = f32[16]{0} multiply(multiply.162, reshape.148) constant.138 = f32[] constant(0) ROOT tuple.165 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.164, multiply.163, constant.138, get-tuple-element.135, get-tuple-element.136, get-tuple-element.137) } region_5.166 { arg_tuple.167 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.169 = f32[16]{0} get-tuple-element(arg_tuple.167), index=1 get-tuple-element.170 = f32[] get-tuple-element(arg_tuple.167), index=2 get-tuple-element.171 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=3 get-tuple-element.172 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=4 get-tuple-element.173 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=5 get-tuple-element.168 = s32[] get-tuple-element(arg_tuple.167), index=0 constant.174 = s32[] constant(16) ROOT compare.175 = pred[] compare(get-tuple-element.168, constant.174), direction=LT } ENTRY main.183 { constant.6 = s32[] constant(0) Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]} call.55 = (f32[16,16]{1,0}, f32[16,16]{1,0}) call(Arg_0.1), to_apply=core_closed_call.43 get-tuple-element.56 = f32[16,16]{1,0} get-tuple-element(call.55), index=0 get-tuple-element.57 = f32[16,16]{1,0} get-tuple-element(call.55), index=1 constant.7 = f32[] constant(1) tuple.58 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) tuple(get-tuple-element.56, get-tuple-element.57, Arg_0.1, constant.7) opt-barrier.59 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) opt-barrier(tuple.58) get-tuple-element.62 = f32[16]{0} get-tuple-element(opt-barrier.59), index=2 constant.4 = f32[] constant(0) broadcast.5 = f32[16,16]{1,0} broadcast(constant.4), dimensions={} get-tuple-element.60 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=0 get-tuple-element.61 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=1 tuple.64 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, get-tuple-element.62, broadcast.5, broadcast.5, broadcast.5, get-tuple-element.60, get-tuple-element.61) while.121 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.64), condition=region_3.110, body=region_2.65 get-tuple-element.122 = s32[] get-tuple-element(while.121), index=0 get-tuple-element.123 = f32[16]{0} get-tuple-element(while.121), index=1 get-tuple-element.127 = f32[16,16]{1,0} get-tuple-element(while.121), index=5 get-tuple-element.128 = f32[16,16]{1,0} get-tuple-element(while.121), index=6 constant.2 = f32[] constant(0) broadcast.3 = f32[16]{0} broadcast(constant.2), dimensions={} get-tuple-element.63 = f32[] get-tuple-element(opt-barrier.59), index=3 get-tuple-element.124 = f32[16,16]{1,0} get-tuple-element(while.121), index=2 get-tuple-element.125 = f32[16,16]{1,0} get-tuple-element(while.121), index=3 get-tuple-element.126 = f32[16,16]{1,0} get-tuple-element(while.121), index=4 tuple.129 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, broadcast.3, get-tuple-element.63, get-tuple-element.124, get-tuple-element.125, get-tuple-element.126) while.176 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.129), condition=region_5.166, body=region_4.130 get-tuple-element.177 = s32[] get-tuple-element(while.176), index=0 ROOT get-tuple-element.178 = f32[16]{0} get-tuple-element(while.176), index=1 get-tuple-element.179 = f32[] get-tuple-element(while.176), index=2 get-tuple-element.180 = f32[16,16]{1,0} get-tuple-element(while.176), index=3 get-tuple-element.181 = f32[16,16]{1,0} get-tuple-element(while.176), index=4 get-tuple-element.182 = f32[16,16]{1,0} get-tuple-element(while.176), index=5 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); bool changed = ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value(); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); int64_t custom_calls_count = 0; for (auto* c : module->computations()) { for (auto* instr : c->instructions()) { if (instr->IsCustomCall( host_memory_offload_annotations::kMoveToHostCustomCallTarget) || instr->IsCustomCall( host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) { ++custom_calls_count; } } } EXPECT_EQ(custom_calls_count, 4); } TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest, ConvertUnpinnedHostTest) { const char* hlo_string = R"( HloModule jit_f, entry_computation_layout={(f32[16]{0})->f32[16]{0}} region_0.9 { arg_tuple.10 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.11 = s32[] get-tuple-element(arg_tuple.10), index=0 constant.15 = s32[] constant(1) add.33 = s32[] add(get-tuple-element.11, constant.15) get-tuple-element.12 = f32[16]{0} get-tuple-element(arg_tuple.10), index=1 sine.18 = f32[16]{0} sine(get-tuple-element.12) sine.19 = f32[16]{0} sine(sine.18) sine.20 = f32[16]{0} sine(sine.19) get-tuple-element.13 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=2 custom-call.21 = f32[16]{0} custom-call(sine.19), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="unpinned_host"} reshape.23 = f32[1,16]{1,0} reshape(custom-call.21) constant.17 = s32[] constant(0) compare.24 = pred[] compare(get-tuple-element.11, constant.17), direction=LT constant.16 = s32[] constant(16) add.25 = s32[] add(get-tuple-element.11, constant.16) select.26 = s32[] select(compare.24, add.25, get-tuple-element.11) dynamic-update-slice.27 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.13, reshape.23, select.26, constant.17) get-tuple-element.14 = f32[16,16]{1,0} get-tuple-element(arg_tuple.10), index=3 custom-call.22 = f32[16]{0} custom-call(sine.20), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="unpinned_host"} reshape.28 = f32[1,16]{1,0} reshape(custom-call.22) compare.29 = pred[] compare(get-tuple-element.11, constant.17), direction=LT add.30 = s32[] add(get-tuple-element.11, constant.16) select.31 = s32[] select(compare.29, add.30, get-tuple-element.11) dynamic-update-slice.32 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.14, reshape.28, select.31, constant.17) ROOT tuple.34 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.33, sine.20, dynamic-update-slice.27, dynamic-update-slice.32) } region_1.35 { arg_tuple.36 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.38 = f32[16]{0} get-tuple-element(arg_tuple.36), index=1 get-tuple-element.39 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=2 get-tuple-element.40 = f32[16,16]{1,0} get-tuple-element(arg_tuple.36), index=3 get-tuple-element.37 = s32[] get-tuple-element(arg_tuple.36), index=0 constant.41 = s32[] constant(16) ROOT compare.42 = pred[] compare(get-tuple-element.37, constant.41), direction=LT } core_closed_call.43 { constant.47 = s32[] constant(0) Arg_0.44 = f32[16]{0} parameter(0) constant.45 = f32[] constant(0) broadcast.46 = f32[16,16]{1,0} broadcast(constant.45), dimensions={} tuple.48 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.47, Arg_0.44, broadcast.46, broadcast.46) while.49 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.48), condition=region_1.35, body=region_0.9 get-tuple-element.50 = s32[] get-tuple-element(while.49), index=0 get-tuple-element.51 = f32[16]{0} get-tuple-element(while.49), index=1 get-tuple-element.52 = f32[16,16]{1,0} get-tuple-element(while.49), index=2 get-tuple-element.53 = f32[16,16]{1,0} get-tuple-element(while.49), index=3 ROOT tuple.54 = (f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(get-tuple-element.52, get-tuple-element.53) } region_2.65 { arg_tuple.66 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.67 = s32[] get-tuple-element(arg_tuple.66), index=0 constant.74 = s32[] constant(1) add.108 = s32[] add(get-tuple-element.67, constant.74) get-tuple-element.73 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=6 constant.76 = s32[] constant(0) compare.82 = pred[] compare(get-tuple-element.67, constant.76), direction=LT constant.75 = s32[] constant(16) add.83 = s32[] add(get-tuple-element.67, constant.75) select.84 = s32[] select(compare.82, add.83, get-tuple-element.67) dynamic-slice.85 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.73, select.84, constant.76), dynamic_slice_sizes={1,16} reshape.86 = f32[16]{0} reshape(dynamic-slice.85) custom-call.87 = f32[16]{0} custom-call(reshape.86), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"} get-tuple-element.69 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=2 get-tuple-element.68 = f32[16]{0} get-tuple-element(arg_tuple.66), index=1 cosine.88 = f32[16]{0} cosine(get-tuple-element.68) reshape.93 = f32[1,16]{1,0} reshape(cosine.88) compare.94 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.95 = s32[] add(get-tuple-element.67, constant.75) select.96 = s32[] select(compare.94, add.95, get-tuple-element.67) dynamic-update-slice.97 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.69, reshape.93, select.96, constant.76) get-tuple-element.70 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=3 sine.89 = f32[16]{0} sine(get-tuple-element.68) cosine.90 = f32[16]{0} cosine(sine.89) reshape.98 = f32[1,16]{1,0} reshape(cosine.90) compare.99 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.100 = s32[] add(get-tuple-element.67, constant.75) select.101 = s32[] select(compare.99, add.100, get-tuple-element.67) dynamic-update-slice.102 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.70, reshape.98, select.101, constant.76) get-tuple-element.71 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=4 get-tuple-element.72 = f32[16,16]{1,0} get-tuple-element(arg_tuple.66), index=5 compare.77 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.78 = s32[] add(get-tuple-element.67, constant.75) select.79 = s32[] select(compare.77, add.78, get-tuple-element.67) dynamic-slice.80 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.72, select.79, constant.76), dynamic_slice_sizes={1,16} reshape.81 = f32[16]{0} reshape(dynamic-slice.80) custom-call.91 = f32[16]{0} custom-call(reshape.81), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="device"} cosine.92 = f32[16]{0} cosine(custom-call.91) reshape.103 = f32[1,16]{1,0} reshape(cosine.92) compare.104 = pred[] compare(get-tuple-element.67, constant.76), direction=LT add.105 = s32[] add(get-tuple-element.67, constant.75) select.106 = s32[] select(compare.104, add.105, get-tuple-element.67) dynamic-update-slice.107 = f32[16,16]{1,0} dynamic-update-slice(get-tuple-element.71, reshape.103, select.106, constant.76) ROOT tuple.109 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.108, custom-call.87, dynamic-update-slice.97, dynamic-update-slice.102, dynamic-update-slice.107, get-tuple-element.72, get-tuple-element.73) } region_3.110 { arg_tuple.111 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.113 = f32[16]{0} get-tuple-element(arg_tuple.111), index=1 get-tuple-element.114 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=2 get-tuple-element.115 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=3 get-tuple-element.116 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=4 get-tuple-element.117 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=5 get-tuple-element.118 = f32[16,16]{1,0} get-tuple-element(arg_tuple.111), index=6 get-tuple-element.112 = s32[] get-tuple-element(arg_tuple.111), index=0 constant.119 = s32[] constant(16) ROOT compare.120 = pred[] compare(get-tuple-element.112, constant.119), direction=LT } region_4.130 { arg_tuple.131 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.132 = s32[] get-tuple-element(arg_tuple.131), index=0 constant.140 = s32[] constant(1) add.164 = s32[] add(get-tuple-element.132, constant.140) get-tuple-element.133 = f32[16]{0} get-tuple-element(arg_tuple.131), index=1 get-tuple-element.134 = f32[] get-tuple-element(arg_tuple.131), index=2 broadcast.159 = f32[16]{0} broadcast(get-tuple-element.134), dimensions={} add.160 = f32[16]{0} add(get-tuple-element.133, broadcast.159) get-tuple-element.137 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=5 constant.141 = s32[] constant(16) subtract.142 = s32[] subtract(constant.141, get-tuple-element.132) subtract.143 = s32[] subtract(subtract.142, constant.140) constant.139 = s32[] constant(0) compare.154 = pred[] compare(subtract.143, constant.139), direction=LT add.155 = s32[] add(subtract.143, constant.141) select.156 = s32[] select(compare.154, add.155, subtract.143) dynamic-slice.157 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.137, select.156, constant.139), dynamic_slice_sizes={1,16} reshape.158 = f32[16]{0} reshape(dynamic-slice.157) multiply.161 = f32[16]{0} multiply(add.160, reshape.158) get-tuple-element.136 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=4 compare.149 = pred[] compare(subtract.143, constant.139), direction=LT add.150 = s32[] add(subtract.143, constant.141) select.151 = s32[] select(compare.149, add.150, subtract.143) dynamic-slice.152 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.136, select.151, constant.139), dynamic_slice_sizes={1,16} reshape.153 = f32[16]{0} reshape(dynamic-slice.152) multiply.162 = f32[16]{0} multiply(multiply.161, reshape.153) get-tuple-element.135 = f32[16,16]{1,0} get-tuple-element(arg_tuple.131), index=3 compare.144 = pred[] compare(subtract.143, constant.139), direction=LT add.145 = s32[] add(subtract.143, constant.141) select.146 = s32[] select(compare.144, add.145, subtract.143) dynamic-slice.147 = f32[1,16]{1,0} dynamic-slice(get-tuple-element.135, select.146, constant.139), dynamic_slice_sizes={1,16} reshape.148 = f32[16]{0} reshape(dynamic-slice.147) multiply.163 = f32[16]{0} multiply(multiply.162, reshape.148) constant.138 = f32[] constant(0) ROOT tuple.165 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(add.164, multiply.163, constant.138, get-tuple-element.135, get-tuple-element.136, get-tuple-element.137) } region_5.166 { arg_tuple.167 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) parameter(0) get-tuple-element.169 = f32[16]{0} get-tuple-element(arg_tuple.167), index=1 get-tuple-element.170 = f32[] get-tuple-element(arg_tuple.167), index=2 get-tuple-element.171 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=3 get-tuple-element.172 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=4 get-tuple-element.173 = f32[16,16]{1,0} get-tuple-element(arg_tuple.167), index=5 get-tuple-element.168 = s32[] get-tuple-element(arg_tuple.167), index=0 constant.174 = s32[] constant(16) ROOT compare.175 = pred[] compare(get-tuple-element.168, constant.174), direction=LT } ENTRY main.183 { constant.6 = s32[] constant(0) Arg_0.1 = f32[16]{0} parameter(0), sharding={devices=[2]<=[2]} call.55 = (f32[16,16]{1,0}, f32[16,16]{1,0}) call(Arg_0.1), to_apply=core_closed_call.43 get-tuple-element.56 = f32[16,16]{1,0} get-tuple-element(call.55), index=0 get-tuple-element.57 = f32[16,16]{1,0} get-tuple-element(call.55), index=1 constant.7 = f32[] constant(1) tuple.58 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) tuple(get-tuple-element.56, get-tuple-element.57, Arg_0.1, constant.7) opt-barrier.59 = (f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16]{0}, f32[]) opt-barrier(tuple.58) get-tuple-element.62 = f32[16]{0} get-tuple-element(opt-barrier.59), index=2 constant.4 = f32[] constant(0) broadcast.5 = f32[16,16]{1,0} broadcast(constant.4), dimensions={} get-tuple-element.60 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=0 get-tuple-element.61 = f32[16,16]{1,0} get-tuple-element(opt-barrier.59), index=1 tuple.64 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, get-tuple-element.62, broadcast.5, broadcast.5, broadcast.5, get-tuple-element.60, get-tuple-element.61) while.121 = (s32[], f32[16]{0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.64), condition=region_3.110, body=region_2.65 get-tuple-element.122 = s32[] get-tuple-element(while.121), index=0 get-tuple-element.123 = f32[16]{0} get-tuple-element(while.121), index=1 get-tuple-element.127 = f32[16,16]{1,0} get-tuple-element(while.121), index=5 get-tuple-element.128 = f32[16,16]{1,0} get-tuple-element(while.121), index=6 constant.2 = f32[] constant(0) broadcast.3 = f32[16]{0} broadcast(constant.2), dimensions={} get-tuple-element.63 = f32[] get-tuple-element(opt-barrier.59), index=3 get-tuple-element.124 = f32[16,16]{1,0} get-tuple-element(while.121), index=2 get-tuple-element.125 = f32[16,16]{1,0} get-tuple-element(while.121), index=3 get-tuple-element.126 = f32[16,16]{1,0} get-tuple-element(while.121), index=4 tuple.129 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) tuple(constant.6, broadcast.3, get-tuple-element.63, get-tuple-element.124, get-tuple-element.125, get-tuple-element.126) while.176 = (s32[], f32[16]{0}, f32[], f32[16,16]{1,0}, f32[16,16]{1,0}, f32[16,16]{1,0}) while(tuple.129), condition=region_5.166, body=region_4.130 get-tuple-element.177 = s32[] get-tuple-element(while.176), index=0 ROOT get-tuple-element.178 = f32[16]{0} get-tuple-element(while.176), index=1 get-tuple-element.179 = f32[] get-tuple-element(while.176), index=2 get-tuple-element.180 = f32[16,16]{1,0} get-tuple-element(while.176), index=3 get-tuple-element.181 = f32[16,16]{1,0} get-tuple-element(while.176), index=4 get-tuple-element.182 = f32[16,16]{1,0} get-tuple-element(while.176), index=5 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); bool changed = ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value(); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); int64_t custom_calls_count = 0; for (auto* c : module->computations()) { for (auto* instr : c->instructions()) { if (instr->IsCustomCall( host_memory_offload_annotations::kMoveToHostCustomCallTarget) || instr->IsCustomCall( host_memory_offload_annotations::kMoveToDeviceCustomCallTarget)) { ++custom_calls_count; } } } EXPECT_EQ(custom_calls_count, 4); } TEST_F(ConvertMemoryPlacementToInternalAnnotationsTest, ConvertOutputPinnedHostTest) { constexpr std::string_view hlo_string = R"( HloModule m, entry_computation_layout={(f32[2,2]{1,0:T(2,128)},f32[2,2]{1,0:T(2,128)})->f32[2,2]{1,0:T(2,128)S(5)}} ENTRY m { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) crs = f32[2,2] add(x, y) ROOT transfer = f32[2,2] custom-call(crs), custom_call_target="annotate_device_placement", frontend_attributes={_xla_buffer_placement="pinned_host"} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_string)); bool changed = ConvertMemoryPlacementToInternalAnnotations().Run(module.get()).value(); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); int64_t move_to_host_count = 0; for (auto* c : module->computations()) { for (auto* instr : c->instructions()) { move_to_host_count += instr->IsCustomCall( host_memory_offload_annotations::kMoveToHostCustomCallTarget); } } EXPECT_EQ(move_to_host_count, 1); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_memory_placement_to_internal_annotations.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_memory_placement_to_internal_annotations_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ce41f9f8-1f4d-4324-9dbd-5d7e74c1ec3e
cpp
tensorflow/tensorflow
gpu_compilation_environment
third_party/xla/xla/service/gpu_compilation_environment.cc
third_party/xla/xla/service/gpu_compilation_environment_test.cc
#include "xla/service/gpu_compilation_environment.h" #include <cstdint> #include <memory> #include <string> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_join.h" #include "xla/parse_flags_from_env.h" #include "xla/service/compilation_environments.h" #include "xla/tsl/util/command_line_flags.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace xla { void InitializeFlagsForGpuCompEnv(std::vector<tsl::Flag>* flag_list, GpuCompilationEnvironment* gpu_comp_env) { auto int64_setter_for = [gpu_comp_env]( void (GpuCompilationEnvironment::*member_setter)(int64_t)) { return [gpu_comp_env, member_setter](int64_t value) { (gpu_comp_env->*member_setter)(value); return true; }; }; flag_list->push_back(tsl::Flag( "dummy_flag", int64_setter_for(&GpuCompilationEnvironment::set_dummy_flag), gpu_comp_env->dummy_flag(), "Dummy flag to demonstrate the flow")); } absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromFlagStrings( std::vector<std::string>& flags, bool strict) { GpuCompilationEnvironment gpu_comp_env; std::vector<tsl::Flag> flag_objects; InitializeFlagsForGpuCompEnv(&flag_objects, &gpu_comp_env); bool result = tsl::Flags::Parse(flags, flag_objects); if (!result || (strict && !flags.empty())) { return InvalidArgument("Could not parse flags: %s", absl::StrJoin(flags, ", ")); } return gpu_comp_env; } absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromEnvVar() { GpuCompilationEnvironment env; std::vector<tsl::Flag> flag_objects; InitializeFlagsForGpuCompEnv(&flag_objects, &env); ParseFlagsFromEnvAndIgnoreUnknown("XLA_FLAGS", flag_objects); return env; } GpuCompilationEnvironment CreateGpuCompEnvWithDefaultValues() { GpuCompilationEnvironment env; env.set_dummy_flag(1); return env; } absl::Status InitializeMissingFieldsFromXLAFlags( GpuCompilationEnvironment& env) { TF_ASSIGN_OR_RETURN(GpuCompilationEnvironment from_env, CreateGpuCompEnvFromEnvVar()); auto default_env = CreateGpuCompEnvWithDefaultValues(); auto reflection = env.GetReflection(); auto reflection_from_env = from_env.GetReflection(); auto descriptor = GpuCompilationEnvironment::descriptor(); std::vector<const tsl::protobuf::FieldDescriptor*> missing_fields; for (int j = 0; j < descriptor->field_count(); ++j) { const tsl::protobuf::FieldDescriptor* field = descriptor->field(j); if (reflection->HasField(env, field) && reflection_from_env->HasField(from_env, field)) { return InvalidArgument( "Flag %s is set in both XLA_FLAGS env var and " "GpuCompilationEnvironment.", field->name()); } else if (!reflection->HasField(env, field) && !reflection_from_env->HasField(from_env, field)) { missing_fields.push_back(field); } } env.MergeFrom(from_env); if (!missing_fields.empty()) { reflection->SwapFields(&env, &default_env, missing_fields); } return absl::OkStatus(); } namespace { absl::StatusOr<std::unique_ptr<tsl::protobuf::Message>> ProcessNewGpuCompilationEnvironment( std::unique_ptr<tsl::protobuf::Message> env) { if (!env) { env = std::make_unique<GpuCompilationEnvironment>(); } return env; } } } static bool InitModule() { xla::CompilationEnvironments::RegisterProcessNewEnvFn( xla::GpuCompilationEnvironment::descriptor(), xla::ProcessNewGpuCompilationEnvironment); return true; } static bool module_initialized = InitModule();
#include "xla/service/gpu_compilation_environment.h" #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/parse_flags_from_env.h" #include "xla/service/compilation_environments.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::tsl::testing::StatusIs; void set_xla_flags_env_var(const std::string& xla_flags) { int* pargc; std::vector<char*>* pargv; ResetFlagsFromEnvForTesting("XLA_FLAGS", &pargc, &pargv); tsl::setenv("XLA_FLAGS", xla_flags.c_str(), true ); } TEST(CreateGpuCompEnvFromFlagStringsTest, ValidFlags) { std::vector<std::string> flags = {"--dummy_flag=2"}; TF_ASSERT_OK_AND_ASSIGN( GpuCompilationEnvironment gpu_comp_env, CreateGpuCompEnvFromFlagStrings(flags, true)); ASSERT_EQ(gpu_comp_env.dummy_flag(), 2); ASSERT_TRUE(flags.empty()); } TEST(CreateGpuCompEnvFromFlagStringsTest, EmptyFlags) { std::vector<std::string> flags; TF_ASSERT_OK_AND_ASSIGN( GpuCompilationEnvironment gpu_comp_env, CreateGpuCompEnvFromFlagStrings(flags, true)); } TEST(CreateGpuCompEnvFromFlagStringsTest, InvalidFlagName) { std::vector<std::string> flags = {"--xla_gpu_invalid_flag=2"}; EXPECT_THAT(CreateGpuCompEnvFromFlagStrings(flags, true), StatusIs(tsl::error::INVALID_ARGUMENT)); TF_ASSERT_OK_AND_ASSIGN( GpuCompilationEnvironment gpu_comp_env, CreateGpuCompEnvFromFlagStrings(flags, false)); ASSERT_EQ(flags.size(), 1); } TEST(CreateGpuCompEnvFromEnvVarTest, ValidFlags) { set_xla_flags_env_var("--dummy_flag=4"); TF_ASSERT_OK_AND_ASSIGN(GpuCompilationEnvironment gpu_comp_env, CreateGpuCompEnvFromEnvVar()); ASSERT_EQ(gpu_comp_env.dummy_flag(), 4); } TEST(InitializeMissingFieldsFromXLAFlagsTest, BothProtoAndEnvVarUnset) { set_xla_flags_env_var(""); GpuCompilationEnvironment env; TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env)); EXPECT_EQ(env.dummy_flag(), 1); } TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoSetButEnvVarUnset) { set_xla_flags_env_var(""); GpuCompilationEnvironment env; env.set_dummy_flag(2); TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env)); EXPECT_EQ(env.dummy_flag(), 2); } TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoUnsetButEnvVarSet) { set_xla_flags_env_var("--dummy_flag=4"); GpuCompilationEnvironment env; TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env)); EXPECT_EQ(env.dummy_flag(), 4); } TEST(InitializeMissingFieldsFromXLAFlagsTest, BothProtoAndEnvVarSetButNoConflict) { set_xla_flags_env_var("--dummy_flag=4"); CompilationEnvironments envs; GpuCompilationEnvironment env; TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env)); EXPECT_EQ(env.dummy_flag(), 4); } TEST(InitializeMissingFieldsFromXLAFlagsTest, BothProtoAndEnvVarSetWithConflict) { set_xla_flags_env_var("--dummy_flag=4"); CompilationEnvironments envs; GpuCompilationEnvironment env; env.set_dummy_flag(2); EXPECT_THAT(InitializeMissingFieldsFromXLAFlags(env), StatusIs(tsl::error::INVALID_ARGUMENT)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu_compilation_environment.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu_compilation_environment_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
371d52b9-46aa-4f5f-a647-47eba0f32fe7
cpp
tensorflow/tensorflow
instruction_fusion
third_party/xla/xla/service/gpu/transforms/instruction_fusion.cc
third_party/xla/xla/service/gpu/transforms/instruction_fusion_test.cc
#include "xla/service/gpu/transforms/instruction_fusion.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/meta/type_traits.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/fusion_node_indexing_evaluation.h" #include "xla/service/fusion_queue.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/instruction_fusion.h" #include "xla/shape.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { bool ElementIsF32OrF16(const Shape& shape) { PrimitiveType type = shape.element_type(); return type == F32 || type == F16; } class EmptyFusionQueue : public FusionQueue { public: std::pair<HloInstruction*, std::vector<int64_t>> DequeueNextInstructionAndOperandsToFuseInOrder() override { return {nullptr, {}}; } void RemoveInstruction(HloInstruction* instruction) override {}; const std::vector<bool>* FusionConfiguration() override { return nullptr; }; }; } absl::StatusOr<bool> GpuInstructionFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { fusion_node_evaluations_.clear(); auto fusible_computations = GetFusibleComputations(*module, execution_threads); fusible_computations_ = {fusible_computations.begin(), fusible_computations.end()}; return InstructionFusion::Run(module, execution_threads); } bool GpuInstructionFusion::IsExpensive( const HloInstruction& instruction) { switch (instruction.opcode()) { case HloOpcode::kDivide: case HloOpcode::kSqrt: case HloOpcode::kRsqrt: case HloOpcode::kExp: if (ElementIsF32OrF16(instruction.shape())) { return false; } break; default: break; } return InstructionFusion::IsExpensive(instruction); } FusionDecision GpuInstructionFusion::ShouldFuseInexpensiveChecks( HloInstruction* consumer, int64_t operand_index) { HloInstruction* producer = consumer->mutable_operand(operand_index); if (producer->opcode() == HloOpcode::kFusion) { return FusionDecision::Forbid("the producer is a fusion"); } if (consumer->IsCustomFusion()) { return FusionDecision::Forbid("the consumer is a custom fusion"); } if (is_expensive(*producer) && ReusesOperandElements(consumer, operand_index)) { return FusionDecision::Forbid( "the producer is expensive, and the consumer reuses inputs"); } if (IsInputFusibleReduction(*consumer) && IsPhysicallyTransposing(*producer)) { return FusionDecision::Forbid( "fusing the producer would break read coalescing"); } RETURN_IF_NOT_FUSIBLE(IsProducerConsumerFusible(*producer, *consumer)); if (CreatesHeavyComputation(*producer, *consumer)) { return FusionDecision::Forbid( "the fusion would create a heavy computation"); } return InstructionFusion::ShouldFuse(consumer, operand_index); } FusionDecision GpuInstructionFusion::ShouldFuse(HloInstruction* consumer, int64_t operand_index) { RETURN_IF_NOT_FUSIBLE(ShouldFuseInexpensiveChecks(consumer, operand_index)); auto producer = consumer->operand(operand_index); RETURN_IF_NOT_FUSIBLE( FusionFitsInBudget(*consumer, *producer, device_info_, true)); if (consumer->opcode() != HloOpcode::kFusion) { return FusionDecision::Allow(); } if (fusion_node_evaluations_.find(consumer) == fusion_node_evaluations_.end()) { fusion_node_evaluations_.emplace(consumer, FusionNodeIndexingEvaluation(consumer)); } if (fusion_node_evaluations_.at(consumer).CodeDuplicationTooHigh(producer)) { return FusionDecision::Forbid( "the fusion would result in an overly large code duplication"); } return FusionDecision::Allow(); } HloInstruction::FusionKind GpuInstructionFusion::ChooseKind( const HloInstruction* producer, const HloInstruction* consumer) { return ChooseFusionKind(*producer, *consumer); } HloInstruction* GpuInstructionFusion::FuseInstruction( HloInstruction* fusion_instruction, HloInstruction* producer) { auto evaluation = fusion_node_evaluations_.find(fusion_instruction); if (evaluation == fusion_node_evaluations_.end()) { evaluation = fusion_node_evaluations_ .emplace(fusion_instruction, FusionNodeIndexingEvaluation(fusion_instruction)) .first; } auto indexing_users = evaluation->second.RemoveFusionOperand(producer); HloInstruction* new_producer = InstructionFusion::FuseInstruction(fusion_instruction, producer); evaluation->second.UpdateEvaluationCache(new_producer, indexing_users); return new_producer; } std::unique_ptr<FusionQueue> GpuInstructionFusion::GetFusionQueue( HloComputation* computation) { if (fusible_computations_.contains(computation)) { return InstructionFusion::GetFusionQueue(computation); } return std::make_unique<EmptyFusionQueue>(); } } }
#include "xla/service/gpu/transforms/instruction_fusion.h" #include <cstdint> #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_utils.h" #include "xla/tests/verified_hlo_module.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace m = ::xla::match; namespace xla { namespace gpu { class InstructionFusionTest : public HloTestBase { public: GpuInstructionFusion duplicating_instruction_fusion_{ true, TestGpuDeviceInfo::RTXA6000DeviceInfo()}; }; TEST_F(InstructionFusionTest, NoFusionIntoCustomFusionConsumer) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule m c { p0 = bf16[3000,53]{1,0} parameter(0) p1 = bf16[22,53]{1,0} parameter(1) d = bf16[3000,22]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} r = bf16[1,1,3000,22]{3,2,1,0} reshape(d) ROOT c = bf16[1,1,3000,22]{2,1,3,0} copy(r) } ENTRY e { p1 = bf16[3000,53]{1,0} parameter(1) p0 = bf16[22,53]{1,0} parameter(0) cp0 = bf16[22,53]{1,0} convert(p0) ROOT f = bf16[1,1,3000,22]{2,1,3,0} fusion(p1, cp0), kind=kCustom, calls=c })")); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, CostlyProducerAndOperandElementReusingConsumerNotFused) { HloComputation::Builder builder(TestName()); HloInstruction* const0 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0(5.0f))); HloInstruction* log1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(F32, {}), HloOpcode::kLog, const0)); HloInstruction* broadcast2 = builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {1}), log1, {})); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_EQ(broadcast2, computation->root_instruction()); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); EXPECT_EQ(broadcast2, computation->root_instruction()); } TEST_F(InstructionFusionTest, NonCostlyProducerAndOperandElementReusingConsumerFused) { HloComputation::Builder builder(TestName()); HloInstruction* const0 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0(5))); HloInstruction* negate1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(S32, {}), HloOpcode::kNegate, const0)); HloInstruction* broadcast2 = builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(S32, {1}), negate1, {})); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_EQ(broadcast2, computation->root_instruction()); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion())); } TEST_F(InstructionFusionTest, CostlyProducerAndNonOperandElementReusingConsumerFused_Reshape) { HloComputation::Builder builder(TestName()); HloInstruction* const0 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0(5.0f))); HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(F32, {}), HloOpcode::kExp, const0)); HloInstruction* reshape2 = builder.AddInstruction( HloInstruction::CreateReshape(ShapeUtil::MakeShape(F32, {}), exp1)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_EQ(reshape2, computation->root_instruction()); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion())); } TEST_F(InstructionFusionTest, CostlyProducerAndNonOperandElementReusingConsumerFused_Transpose) { HloComputation::Builder builder(TestName()); Shape operand_shape = ShapeUtil::MakeShape(F32, {64, 32}); HloInstruction* param = builder.AddInstruction( HloInstruction::CreateParameter(0, operand_shape, "param0")); HloInstruction* exp1 = builder.AddInstruction( HloInstruction::CreateUnary(operand_shape, HloOpcode::kExp, param)); HloInstruction* transpose2 = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(F32, {32, 64}), exp1, {1, 0})); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_EQ(transpose2, computation->root_instruction()); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Fusion())); } TEST_F(InstructionFusionTest, PotentialBitcastReshapeOfDotFused) { HloComputation::Builder builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 1}), "0")); auto dot1 = builder.AddInstruction( CreateCanonicalDot(ShapeUtil::MakeShape(F32, {1, 1}), param0, param0)); auto reshape2 = builder.AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(F32, {1, 1, 1}), dot1)); auto log = builder.AddInstruction(HloInstruction::CreateUnary( reshape2->shape(), xla::HloOpcode::kLog, reshape2)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_EQ(log, computation->root_instruction()); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, PotentialBitcastTransposeOfDotUnfused) { HloComputation::Builder builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(S32, {1, 1}), "0")); auto dot1 = builder.AddInstruction( CreateCanonicalDot(ShapeUtil::MakeShape(S32, {1, 1}), param0, param0)); auto transpose2 = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(S32, {1, 1}), dot1, {0, 1})); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_EQ(transpose2, computation->root_instruction()); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, BroadcastIntoReduce) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY BroadcastIntoReduce { constant = f32[] constant(1) broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={} constant.1 = f32[] constant(0) ROOT reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3}, to_apply=add })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_THAT( root->fused_expression_root(), GmockMatch(m::Reduce(m::Broadcast(m::Constant()), m::Constant()))); } TEST_F(InstructionFusionTest, DoNotFuseLayoutChangingOpWithReduce) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY entry { p0 = f32[16,16,16,16]{3,2,1,0} parameter(0) copy = f32[16,16,16,16]{0,1,2,3} copy(p0) constant.1 = f32[] constant(0) ROOT reduce = f32[16] reduce(copy, constant.1), dimensions={0,1,2}, to_apply=add })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, DoNotFuseLayoutChangingOpWithReduceFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } fused_reduce { p0.1 = f32[16,16,16,16]{0,1,2,3} parameter(0) mul = f32[16,16,16,16]{0,1,2,3} multiply(p0.1, p0.1) c0.1 = f32[] constant(0) ROOT root = f32[] reduce(mul, c0.1), dimensions={0,1,2,3}, to_apply=add } ENTRY entry { p0 = f32[16,16,16,16]{3,2,1,0} parameter(0) copy = f32[16,16,16,16]{0,1,2,3} copy(p0) fusion = f32[] fusion(copy), kind=kInput, calls=fused_reduce ROOT root = (f32[]) tuple(fusion) })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, DoNotRepeatLargeReduceWindow) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY entry { p0 = s32[512,512,2] parameter(0) p1 = f32[1,1,512,512] parameter(1) constant_1 = f32[] constant(1) reduce-window.1 = reduce-window(p1, constant_1), window={size=1x1x9x9}, to_apply=add ROOT ret = gather(reduce-window.1, p0), offset_dims={0,1,2,3}, collapsed_slice_dims={}, start_index_map={1,2}, index_vector_dim=2, slice_sizes={1,1,1,1} })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, FuseLayoutChangingOpWithElementwise) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY entry { p0 = f32[16,16,16,16]{3,2,1,0} parameter(0) copy = f32[16,16,16,16]{0,1,2,3} copy(p0) ROOT add = f32[16,16,16,16]{0,1,2,3} add(copy, copy) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_THAT(root->fused_expression_root(), GmockMatch(m::Add(m::Copy(), m::Copy()))); } TEST_F(InstructionFusionTest, BitcastIntoAdd) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY BroadcastIntoAdd { p0 = f32[4,1,1]{2,1,0} parameter(0) p1 = f32[4,1]{1,0} parameter(1) bitcast = f32[4,1]{1,0} bitcast(p0) ROOT add = f32[4,1] add(bitcast, p1) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_THAT(root->fused_expression_root(), GmockMatch(m::Add(m::Bitcast(m::Parameter()), m::Parameter()))); } TEST_F(InstructionFusionTest, AddIntoBitcast) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY BroadcastIntoAdd { p0 = f32[4,1]{1,0} parameter(0) p1 = f32[4,1]{1,0} parameter(1) add = f32[4,1] add(p0, p1) ROOT bitcast = f32[4,1,1] bitcast(add) })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, ConvertIntoBitcastBothConsumedByTuple) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test ENTRY main { param_0 = f32[2048,16000]{1,0} parameter(0) convert = bf16[2048,16000]{1,0} convert(param_0) bitcast = bf16[16000,1,2048]{2,1,0} bitcast(convert) ROOT tuple.143 = (bf16[16000,1,2048]{2,1,0}, bf16[2048,16000]{1,0}) tuple(bitcast, convert) })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, DontFuseGTE) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY DontFuseGTE { p0 = (f32[10], f32[10]) parameter(0) gte0 = f32[10] get-tuple-element(p0), index=0 gte1 = f32[10] get-tuple-element(p0), index=1 ROOT add = f32[10] add(gte0, gte1) })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, FloatingPointDivIsCheap) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module Add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY TestComputation { zero = f32[] constant(0) p0 = f32[100] parameter(0) p1 = f32[100] parameter(1) recip = f32[100] divide(p1, p0) sum1 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add sum2 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add ROOT root = (f32[], f32[]) tuple(sum1, sum2) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(), m::Fusion()))) << module->ToString(); } TEST_F(InstructionFusionTest, IntegerDivIsNotCheap) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module Add { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY TestComputation { zero = s32[] constant(0) p0 = s32[100] parameter(0) p1 = s32[100] parameter(1) recip = s32[100] divide(p1, p0) sum1 = s32[] reduce(recip, zero), dimensions={0}, to_apply=Add sum2 = s32[] reduce(recip, zero), dimensions={0}, to_apply=Add ROOT mul = (s32[], s32[]) tuple(sum1, sum2) })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()) << module->ToString(); } TEST_F(InstructionFusionTest, DotOutputFusionImpossible) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY NoOutputFusion { alpha = f32[] constant(3) broadcast = f32[4,4]{1,0} broadcast(alpha), dimensions={} p0 = f32[4,3]{1,0} parameter(0) p1 = f32[3,4]{1,0} parameter(1) dot = f32[4,4]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} d = f32[4,4]{1,0} multiply(dot, dot) ROOT mul = f32[4,4] multiply(d, broadcast) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kLoop); EXPECT_THAT( root->fused_expression_root(), GmockMatch(m::Multiply(m::Multiply(m::Parameter(), m::Parameter()), m::Broadcast(m::Constant())))); } static int Count(const HloModule& module, HloOpcode op) { int count = 0; for (const auto* computation : module.computations()) { for (const auto* instruction : computation->instructions()) { if (instruction->opcode() == op) { ++count; } } } return count; } TEST_F(InstructionFusionTest, MultiOutputFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY OutputFusion { p0 = f32[4,3]{1,0} parameter(0) p1 = f32[4,3]{1,0} parameter(1) p2 = f32[4,3]{1,0} parameter(2) sub = f32[4,3]{1,0} subtract(p0, p2) add = f32[4,3]{1,0} add(sub, p1) ROOT tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}) tuple(sub, add) })") .value(); ASSERT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); } TEST_F(InstructionFusionTest, FuseScalarConstant) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY FuseScalarConstant { p0 = f32[] parameter(0) c0 = f32[] constant(1) add1 = f32[] add(p0, c0) b0 = f32[2]{0} broadcast(add1), dimensions={} c1 = f32[2]{0} constant({1, 2}) ROOT add2 = f32[2]{0} add(b0, c1) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_THAT( root->fused_expression_root(), GmockMatch(m::Add(m::Broadcast(m::Add(m::Parameter(), m::Constant())), m::Parameter()))); } TEST_F(InstructionFusionTest, AvoidsLargeFusion) { constexpr int64_t kNumParams = 200; ASSERT_GT(kNumParams, MaxOperandsAndOutputsPerFusion()); HloComputation::Builder b(TestName()); Shape shape = ShapeUtil::MakeShape(F32, {10, 100}); auto param0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p")); auto sum = param0; for (int64_t i = 1; i < kNumParams; ++i) { auto param = b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")); sum = b.AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, param)); } auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(b.Build()); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); for (const HloInstruction* instr : computation->instructions()) { EXPECT_LE(instr->operand_count(), MaxOperandsAndOutputsPerFusion()) << instr->ToString(); } } TEST_F(InstructionFusionTest, FuseIntoScatter) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY FuseIntoScatter { p0 = s32[3,3] parameter(0) p1 = s32[2] parameter(1) indices = s32[2] add(p1, p1) p2 = s32[2,3] parameter(2) updates = s32[2,3] add(p2, p2) scatter = s32[3,3] scatter(p0, indices, updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT add = s32[3,3] add(scatter, scatter) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion()))); EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add()))); } TEST_F(InstructionFusionTest, DontFuseIntoFirstOperandOfScatter) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY FuseIntoScatter { p0 = s32[3,3] parameter(0) operand = s32[3,3] add(p0, p0) p1 = s32[2] parameter(1) indices = s32[2] add(p1, p1) p2 = s32[2,3] parameter(2) updates = s32[2,3] add(p2, p2) scatter = s32[3,3] scatter(operand, indices, updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT add = s32[3,3] add(scatter, scatter) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion()))); EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add()))); } TEST_F(InstructionFusionTest, ScatterOpShouldNotFuseWithSharedOperand) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY Test { parameter.0 = f32[8,8] parameter(0) parameter.1 = s32[7] parameter(1) indices = s32[7] add(parameter.1, parameter.1) slice = f32[7,8] slice(parameter.0), slice={[0:7],[0:8]} ROOT scatter = f32[8,8] scatter(parameter.0, indices, slice), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Fusion(m::Parameter(), m::Slice(), m::Parameter()))); } TEST_F(InstructionFusionTest, NonscalarConstantsNotFused) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY BroadcastIntoReduce { constant = f32[16] constant({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={0} constant.1 = f32[] constant(0) ROOT reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3}, to_apply=add })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_THAT( root->fused_instructions_computation()->root_instruction(), GmockMatch(m::Reduce(m::Broadcast(m::Parameter()), m::Constant()))); } TEST_F(InstructionFusionTest, FuseReverse) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY Reverse { p0 = f32[50,96,1024]{2,1,0} parameter(0) add = f32[50,96,1024]{2,1,0} add(p0, p0) ROOT reverse = f32[50,96,1024] reverse(add), dimensions={0} })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_THAT(root->fused_expression_root(), GmockMatch(m::Reverse(m::Add(m::Parameter(), m::Parameter())))); } TEST_F(InstructionFusionTest, GpuIsExpensiveF32) { auto m = CreateNewVerifiedModule(); Shape r0f32 = ShapeUtil::MakeShape(F32, {}); HloComputation::Builder builder(TestName()); HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); HloInstruction* div = builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kDivide, param0, one)); HloInstruction* rem = builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kRemainder, param0, one)); HloInstruction* sqrt = builder.AddInstruction( HloInstruction::CreateUnary(r0f32, HloOpcode::kSqrt, param0)); HloInstruction* rsqrt = builder.AddInstruction( HloInstruction::CreateUnary(r0f32, HloOpcode::kRsqrt, param0)); HloInstruction* exp = builder.AddInstruction( HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, param0)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div)); EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rem)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*sqrt)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rsqrt)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*exp)); } TEST_F(InstructionFusionTest, GpuIsExpensiveF64) { auto m = CreateNewVerifiedModule(); Shape r0f64 = ShapeUtil::MakeShape(F64, {}); HloComputation::Builder builder(TestName()); HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f64, "param0")); HloInstruction* one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); HloInstruction* div = builder.AddInstruction( HloInstruction::CreateBinary(r0f64, HloOpcode::kDivide, param0, one)); HloInstruction* rem = builder.AddInstruction( HloInstruction::CreateBinary(r0f64, HloOpcode::kRemainder, param0, one)); HloInstruction* sqrt = builder.AddInstruction( HloInstruction::CreateUnary(r0f64, HloOpcode::kSqrt, param0)); HloInstruction* rsqrt = builder.AddInstruction( HloInstruction::CreateUnary(r0f64, HloOpcode::kRsqrt, param0)); HloInstruction* exp = builder.AddInstruction( HloInstruction::CreateUnary(r0f64, HloOpcode::kExp, param0)); EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*div)); EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rem)); EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*sqrt)); EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*rsqrt)); EXPECT_TRUE(GpuInstructionFusion::IsExpensive(*exp)); } TEST_F(InstructionFusionTest, GpuIsExpensiveS32) { auto m = CreateNewVerifiedModule(); Shape r0s32 = ShapeUtil::MakeShape(S32, {}); HloComputation::Builder builder(TestName()); HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0s32, "param0")); HloInstruction* one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); HloInstruction* div = builder.AddInstruction( HloInstruction::CreateBinary(r0s32, HloOpcode::kDivide, param0, one)); HloInstruction* rem = builder.AddInstruction( HloInstruction::CreateBinary(r0s32, HloOpcode::kRemainder, param0, one)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rem)); } TEST_F(InstructionFusionTest, GpuIsExpensiveBroadcastS32) { auto m = CreateNewVerifiedModule(); Shape r1s32 = ShapeUtil::MakeShape(S32, {10}); HloComputation::Builder builder(TestName()); HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r1s32, "param0")); HloInstruction* one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); HloInstruction* one_broad = builder.AddInstruction(HloInstruction::CreateBroadcast(r1s32, one, {})); HloInstruction* div = builder.AddInstruction(HloInstruction::CreateBinary( r1s32, HloOpcode::kDivide, param0, one_broad)); HloInstruction* rem = builder.AddInstruction(HloInstruction::CreateBinary( r1s32, HloOpcode::kRemainder, param0, one_broad)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*div)); EXPECT_FALSE(GpuInstructionFusion::IsExpensive(*rem)); } TEST_F(InstructionFusionTest, FloatingPointExpIsCheap) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module Add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY TestComputation { zero = f32[] constant(0) p0 = f32[100] parameter(0) recip = f32[100] exponential(p0) sum1 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add sum2 = f32[] reduce(recip, zero), dimensions={0}, to_apply=Add ROOT root = (f32[], f32[]) tuple(sum1, sum2) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(), m::Fusion()))) << module->ToString(); } TEST_F(InstructionFusionTest, SmallReducedDimensionIsNotLoweredToLoop) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY FuseSmallReduction { p0 = s32[1048576,4] parameter(0) p1 = s32[1048576,4] parameter(1) sum = s32[1048576,4] add(p0, p1) init = s32[] constant(0) ROOT reduce = s32[1048576] reduce(sum, init), dimensions={1}, to_apply=add })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion())); EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kInput); } TEST_F(InstructionFusionTest, IotaIntoVariadicReduction) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f { tmp_0 = f32[] parameter(0) tmp_1 = f32[] parameter(1) tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1) tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ tmp_5 = s32[] parameter(2) tmp_6 = s32[] parameter(3) tmp_7 = s32[] minimum(tmp_5, tmp_6) tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6) tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8) ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9) } minmax { tmp_0 = f32[] parameter(0) tmp_1 = f32[] parameter(2) tmp_2 = s32[] parameter(1) tmp_3 = s32[] parameter(3) ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=f } ENTRY e { tmp_0 = f32[554112,10]{1,0} parameter(0) tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1 tmp_2 = f32[] constant(-inf) tmp_3 = s32[] constant(0) ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax })") .value(); EXPECT_TRUE(GpuInstructionFusion(false, TestGpuDeviceInfo::RTXA6000DeviceInfo()) .Run(module.get()) .value()); ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter()))); EXPECT_THAT( module->entry_computation()->root_instruction()->fused_expression_root(), GmockMatch( m::Reduce(m::Parameter(), m::Iota(), m::Constant(), m::Constant()))); } TEST_F(InstructionFusionTest, InputReductionFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add.clone.13 { x.27 = f32[] parameter(0) y.27 = f32[] parameter(1) ROOT add.1036 = f32[] add(x.27, y.27) } add.clone.14 { x.28 = f32[] parameter(0) y.28 = f32[] parameter(1) ROOT add.1037 = f32[] add(x.28, y.28) } add { x = bf16[] parameter(0) convert.448 = f32[] convert(x) y = bf16[] parameter(1) convert.449 = f32[] convert(y) add.597 = f32[] add(convert.448, convert.449) ROOT convert.450 = bf16[] convert(add.597) } ENTRY FuseSmallReduction { param_2.7 = bf16[8,16,64,2048]{3,2,1,0} parameter(2) convert.1395 = f32[8,16,64,2048]{3,2,1,0} convert(param_2.7) param_0.85 = bf16[8,16,64,2048]{3,2,1,0} parameter(0) convert.1393 = f32[8,16,64,2048]{3,2,1,0} convert(param_0.85) multiply.1652 = f32[8,16,64,2048]{3,2,1,0} multiply(convert.1395, convert.1393) convert.1392 = bf16[8,16,64,2048]{3,2,1,0} convert(multiply.1652) bitcast.15934 = bf16[128,64,2048]{2,1,0} bitcast(convert.1392) convert.1391 = f32[128,64,2048]{2,1,0} convert(bitcast.15934) param_1.15 = bf16[] parameter(1) convert.1394 = f32[] convert(param_1.15) reduce.462 = f32[128,64]{1,0} reduce(convert.1391, convert.1394), dimensions={2}, to_apply=add.clone.13 reduce.121 = f32[64]{0} reduce(reduce.462, convert.1394), dimensions={0}, to_apply=add.clone.14 ROOT convert.890 = bf16[64]{0} convert(reduce.121) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* fused_convert_fusion = module->entry_computation()->root_instruction(); ASSERT_THAT(fused_convert_fusion, GmockMatch(m::Fusion())); SCOPED_TRACE(module->ToString()); EXPECT_EQ(fused_convert_fusion->fusion_kind(), HloInstruction::FusionKind::kInput); } TEST_F(InstructionFusionTest, DotStrengthReductionFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module scalar_add_computation { scalar_rhs = f32[] parameter(1) scalar_lhs = f32[] parameter(0) ROOT add.1 = f32[] add(scalar_lhs, scalar_rhs) } ENTRY main { param_1.3 = f16[16,64,96,6,2,16]{5,4,3,2,1,0} parameter(1) param_0.6 = f16[16,64,96,1,2,16]{5,4,3,2,1,0} parameter(0) bitcast.26 = f16[16,64,96,2,16]{4,3,2,1,0} bitcast(param_0.6) broadcast.4 = f16[16,64,96,6,2,16]{5,4,3,2,1,0} broadcast(bitcast.26), dimensions={0,1,2,4,5} multiply.4 = f16[16,64,96,6,2,16]{5,4,3,2,1,0} multiply(broadcast.4, param_1.3) convert.8 = f32[16,64,96,6,2,16]{5,4,3,2,1,0} convert(multiply.4) constant_2 = f32[] constant(0) reduce.3 = f32[16,64,96,6,2]{3,4,2,1,0} reduce(convert.8, constant_2), dimensions={5}, to_apply=scalar_add_computation bitcast.25 = f32[16,64,96,2,6]{4,3,2,1,0} bitcast(reduce.3) convert.7 = f16[16,64,96,2,6]{4,3,2,1,0} convert(bitcast.25) ROOT bitcast.24 = f16[16,64,96,2,1,6]{5,4,3,2,1,0} bitcast(convert.7) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); const HloInstruction* fused_convert_fusion = module->entry_computation()->root_instruction()->operand(0); ASSERT_THAT(fused_convert_fusion, GmockMatch(m::Fusion())); SCOPED_TRACE(module->ToString()); EXPECT_EQ(fused_convert_fusion->fusion_kind(), HloInstruction::FusionKind::kInput); EXPECT_EQ(Count(*module, HloOpcode::kFusion), 1); } TEST_F(InstructionFusionTest, ReductionFusionOtherUnaryElementwiseOpsAreFused) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module scalar_add_computation { scalar_rhs = f32[] parameter(1) scalar_lhs = f32[] parameter(0) ROOT add.1 = f32[] add(scalar_lhs, scalar_rhs) } ENTRY main { param_0 = f16[64,96,6,16]{3,2,1,0} parameter(0) constant_2 = f32[] constant(0) reduce.3 = f32[64,6,16]{2,1,0} reduce(param_0, constant_2), dimensions={1}, to_apply=scalar_add_computation negate = f32[64,6,16]{2,1,0} negate(reduce.3) ROOT sine = f16[64,6,16]{2,1,0} sine(negate) })") .value(); EXPECT_TRUE(duplicating_instruction_fusion_.Run(module.get()).value()); HloInstruction* fused_convert_fusion = module->entry_computation()->root_instruction(); ASSERT_THAT(fused_convert_fusion, GmockMatch(m::Fusion())); SCOPED_TRACE(module->ToString()); EXPECT_EQ(fused_convert_fusion->fusion_kind(), HloInstruction::FusionKind::kInput); EXPECT_EQ(Count(*module, HloOpcode::kFusion), 1); } TEST_F(InstructionFusionTest, DoNotFuseInsideReducer) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module scalar_add_computation { scalar_rhs = f32[] parameter(1) scalar_lhs = f32[] parameter(0) add.1 = f32[] add(scalar_lhs, scalar_rhs) ROOT add.2 = f32[] add(add.1, scalar_rhs) } ENTRY main { param_0 = f16[64,96] parameter(0) constant_2 = f32[] constant(0) ROOT reduce = f32[64] reduce(param_0, constant_2), dimensions={1}, to_apply=scalar_add_computation })") .value(); EXPECT_FALSE(duplicating_instruction_fusion_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/instruction_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/instruction_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
548583df-a9ce-42fe-a3f2-1f92da313ff7
cpp
tensorflow/tensorflow
conditional_simplifier
third_party/xla/xla/service/conditional_simplifier.cc
third_party/xla/xla/service/conditional_simplifier_test.cc
#include "xla/service/conditional_simplifier.h" #include <iterator> #include <set> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/service/call_graph.h" #include "xla/service/call_inliner.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" namespace xla { namespace { bool ComputationIsEmptyWithArrayRoot(const HloComputation* computation) { bool empty_operations = absl::c_all_of( computation->MakeInstructionPostOrder(), HloPredicateIsOp<HloOpcode::kTuple, HloOpcode::kGetTupleElement, HloOpcode::kParameter>); bool contains_array = false; ShapeUtil::ForEachSubshape(computation->root_instruction()->shape(), [&](const Shape& shape, const ShapeIndex& index) { if (shape.IsArray()) { contains_array = true; } }); return empty_operations && contains_array; } absl::StatusOr<bool> TryRemoveUnusedConditionalOperands( HloComputation* computation, const absl::flat_hash_set<HloInstruction*>& calling_conditionals) { HloInstruction* param = computation->parameter_instruction(0); if (param == computation->root_instruction()) { return false; } if (!param->shape().IsTuple()) { return false; } std::set<int64_t> tuple_indices_to_keep; for (HloInstruction* user : param->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { return false; } tuple_indices_to_keep.insert(user->tuple_index()); } int64_t old_tuple_element_count = ShapeUtil::TupleElementCount(param->shape()); if (tuple_indices_to_keep.size() == old_tuple_element_count) { return false; } std::vector<const Shape*> new_tuple_shapes; new_tuple_shapes.reserve(tuple_indices_to_keep.size()); std::vector<int64_t> map(old_tuple_element_count, -1); for (int64_t i : tuple_indices_to_keep) { map[i] = new_tuple_shapes.size(); new_tuple_shapes.push_back(&param->shape().tuple_shapes(i)); } Shape tuple_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes); HloComputation* new_computation = computation->parent()->AddEmbeddedComputation(computation->Clone()); param = new_computation->parameter_instruction(0); *param->mutable_shape() = tuple_shape; for (HloInstruction* user : param->users()) { user->set_tuple_index(map[user->tuple_index()]); } for (HloInstruction* conditional : calling_conditionals) { if (conditional->has_sharding()) { continue; } for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) { if (conditional->branch_computation(branch) != computation) { continue; } conditional->set_branch_computation(branch, new_computation); const Shape& old_shape = conditional->operand(branch + 1)->shape(); std::vector<HloInstruction*> new_tuple_operands; new_tuple_operands.reserve(tuple_indices_to_keep.size()); for (int64_t i : tuple_indices_to_keep) { new_tuple_operands.push_back(conditional->parent()->AddInstruction( HloInstruction::CreateGetTupleElement( old_shape.tuple_shapes(i), conditional->mutable_operand(branch + 1), i))); } HloInstruction* new_tuple = conditional->parent()->AddInstruction( HloInstruction::CreateTuple(new_tuple_operands)); TF_RETURN_IF_ERROR( conditional->ReplaceOperandWithDifferentShape(branch + 1, new_tuple)); CHECK(ShapeUtil::Compatible(conditional->operand(branch + 1)->shape(), conditional->branch_computation(branch) ->parameter_instruction(0) ->shape())); CHECK(ShapeUtil::Compatible( conditional->shape(), conditional->branch_computation(branch)->root_instruction()->shape())) << conditional->branch_computation(branch)->ToString(); } } return true; } bool ReplaceRootWithEmptyTupleIfNoUsers(HloInstruction* conditional_op) { const Shape empty_tuple = ShapeUtil::MakeTupleShape({}); if (conditional_op->user_count() == 0 && conditional_op != conditional_op->parent()->root_instruction() && !ShapeUtil::Compatible(empty_tuple, conditional_op->shape())) { for (int64_t branch_id = 0; branch_id < conditional_op->branch_count(); ++branch_id) { auto branch_computation = conditional_op->GetModule()->AddEmbeddedComputation( conditional_op->branch_computation(branch_id)->Clone()); conditional_op->set_branch_computation(branch_id, branch_computation); auto new_empty_root = branch_computation->AddInstruction(HloInstruction::CreateTuple({})); branch_computation->set_root_instruction(new_empty_root, true); } *conditional_op->mutable_shape() = empty_tuple; return true; } return false; } bool RemoveUnusedTupleElements(HloInstruction* conditional_op) { if (conditional_op->user_count() == 0 || conditional_op == conditional_op->parent()->root_instruction() || !conditional_op->shape().IsTuple()) { VLOG(3) << "Skip RemoveUnusedTupleElements due to non-tuple result:\n" << conditional_op->ToShortString(); return false; } const int old_tuple_shapes_size = conditional_op->shape().tuple_shapes_size(); std::vector<bool> used_indices(old_tuple_shapes_size, false); for (const HloInstruction* user : conditional_op->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { VLOG(3) << "Skip RemoveUnusedTupleElements due to non-GTE user:\n" << user->ToShortString(); return false; } used_indices[user->tuple_index()] = true; } const int new_tuple_shapes_size = std::count(used_indices.begin(), used_indices.end(), true); if (new_tuple_shapes_size == old_tuple_shapes_size) { VLOG(3) << "Skip RemoveUnusedTupleElements due to every index is in use."; return false; } absl::flat_hash_map<int, int> new_to_old_mapping, old_to_new_mapping; auto old_iter = used_indices.begin(); for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) { old_iter = std::find(old_iter, used_indices.end(), true); const int old_index = std::distance(used_indices.begin(), old_iter); new_to_old_mapping[new_index] = old_index; old_to_new_mapping[old_index] = new_index; ++old_iter; } const Shape old_shape = conditional_op->shape(); std::vector<const Shape*> new_tuple_shapes; new_tuple_shapes.reserve(new_tuple_shapes_size); for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) { new_tuple_shapes.push_back( &old_shape.tuple_shapes(new_to_old_mapping[new_index])); } const Shape new_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes); for (HloComputation* branch : conditional_op->branch_computations()) { const HloInstruction* root = branch->root_instruction(); if (!root->shape().IsTuple() || !ShapeUtil::Compatible(branch->root_instruction()->shape(), old_shape)) { VLOG(3) << "Skip RemoveUnusedTupleElements due to some branch " << branch->name() << " has in-compatible root shape, expect " << old_shape.ToString() << ", but got " << root->shape().ToString() << "\n" << conditional_op->ToString(); return false; } } for (int branch_id = 0; branch_id < conditional_op->branch_count(); ++branch_id) { HloComputation* old_branch = conditional_op->branch_computation(branch_id); HloComputation* cloned_branch = conditional_op->GetModule()->AddEmbeddedComputation( old_branch->Clone()); conditional_op->set_branch_computation(branch_id, cloned_branch); HloInstruction* old_root = cloned_branch->root_instruction(); std::vector<HloInstruction*> new_tuple_root_operands; for (int old_index = 0; old_index < old_tuple_shapes_size; ++old_index) { if (used_indices[old_index]) { new_tuple_root_operands.push_back( cloned_branch->AddInstruction(HloInstruction::CreateGetTupleElement( old_shape.tuple_shapes(old_index), old_root, old_index))); } } HloInstruction* new_tuple_root = cloned_branch->AddInstruction( HloInstruction::CreateTuple(new_tuple_root_operands)); cloned_branch->set_root_instruction(new_tuple_root, true); } *conditional_op->mutable_shape() = new_shape; for (HloInstruction* user : conditional_op->users()) { const int old_index = user->tuple_index(); const int new_index = old_to_new_mapping[old_index]; user->set_tuple_index(new_index); } return true; } bool MergeDuplicateTupleElements(HloInstruction* conditional) { if (conditional->user_count() == 0 || conditional == conditional->parent()->root_instruction() || !conditional->shape().IsTuple()) { VLOG(3) << "Skip MergeDuplicateTupleElements due not tuple shape nor root " "instruction:\n" << conditional->ToShortString(); return false; } for (const HloInstruction* user : conditional->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { VLOG(3) << "Skip MergeDuplicateTupleElements due not all users are " "kGetTupleElement:\n" << conditional->ToShortString(); return false; } } for (const HloComputation* branch : conditional->branch_computations()) { if (branch->root_instruction()->opcode() != HloOpcode::kTuple) { VLOG(3) << "Skip MergeDuplicateTupleElements due not all branch roots " "are kTuple:\n" << conditional->ToShortString(); return false; } } auto vectorize_branches_root_tuple_ith_operand = [conditional](int64_t i) { std::vector<const HloInstruction*> operands; absl::c_transform(conditional->branch_computations(), std::back_inserter(operands), [i](const HloComputation* branch) { return branch->root_instruction()->operand(i); }); return operands; }; auto replace_root_user_gte_jth_with_gte_ith = [conditional](int64_t i, int64_t j) { bool changed = false; for (HloInstruction* user : conditional->users()) { if (user->tuple_index() == j) { user->set_tuple_index(i); changed |= true; } } return changed; }; bool changed = false; absl::flat_hash_map<std::vector<const HloInstruction*>, int64_t> index_collision_table; for (int i = 0; i < conditional->shape().tuple_shapes_size(); ++i) { const std::vector<const HloInstruction*> ith_operands_vector = vectorize_branches_root_tuple_ith_operand(i); const auto emplace_res = index_collision_table.emplace(ith_operands_vector, i); if (!emplace_res.second) { changed |= replace_root_user_gte_jth_with_gte_ith(emplace_res.first->second, i); } } return changed; } } absl::StatusOr<bool> ConditionalSimplifier::TryRemoveConditional( HloInstruction* conditional) { CHECK_EQ(conditional->opcode(), HloOpcode::kConditional); if (!conditional->parent()->IsSafelyRemovable(conditional) || conditional->HasSideEffect()) { VLOG(2) << "Not attempting to remove conditional as it is not removable or " "has side effect: " << conditional->ToShortString(); return false; } auto computation = conditional->parent(); auto create_call = [&](int64_t branch) { auto call = computation->AddInstruction(HloInstruction::CreateCall( conditional->shape(), {conditional->mutable_operand(1 + branch)}, conditional->branch_computation(branch))); conditional->SetupDerivedInstruction(call); return call; }; if (conditional->branch_count() == 1) { HloInstruction* call_op = create_call(0); TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op)); TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status()); return true; } if (conditional->operand(0)->opcode() == HloOpcode::kConstant) { int branch_index = 0; if (conditional->operand(0)->shape().element_type() == PRED) { branch_index = conditional->operand(0)->literal().Get<bool>({}) ? 0 : 1; } else { branch_index = conditional->operand(0)->literal().Get<int32_t>({}); if (branch_index < 0 || branch_index >= conditional->branch_count()) { branch_index = conditional->branch_count() - 1; } } HloInstruction* call_op = create_call(branch_index); TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op)); TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status()); return true; } auto instruction_is_expensive = [](const HloInstruction* hlo) { switch (hlo->opcode()) { case HloOpcode::kBroadcast: case HloOpcode::kConcatenate: case HloOpcode::kDynamicSlice: case HloOpcode::kGetTupleElement: case HloOpcode::kReduce: case HloOpcode::kReshape: case HloOpcode::kPad: case HloOpcode::kParameter: case HloOpcode::kSlice: case HloOpcode::kTuple: return false; default: return !hlo->IsElementwise(); } }; if (conditional->branch_count() != 2 || conditional->operand(0)->shape().element_type() != PRED || absl::c_any_of(conditional->branch_computation(0)->instructions(), instruction_is_expensive) || absl::c_any_of(conditional->branch_computation(1)->instructions(), instruction_is_expensive)) { VLOG(2) << "Not attempting to remove conditional as its branch_index is not a " "compile-time constant or contains expensive instructions: " << conditional->ToShortString(); return false; } bool branch_empty = ComputationIsEmptyWithArrayRoot(conditional->branch_computation(0)) || ComputationIsEmptyWithArrayRoot(conditional->branch_computation(1)); if (branch_empty) { return false; } HloInstruction* true_call_op = create_call(0); HloInstruction* false_call_op = create_call(1); auto condition_broadcast = [&](const Shape& shape) { if (ShapeUtil::IsScalar(shape)) { return conditional->mutable_operand(0); } Shape new_shape = ShapeUtil::ChangeElementType(shape, PRED); UpdateLayout(&new_shape); return computation->AddInstruction(HloInstruction::CreateBroadcast( new_shape, conditional->mutable_operand(0), {})); }; auto gte = [&](HloInstruction* hlo, int64_t i) { return computation->AddInstruction(HloInstruction::CreateGetTupleElement( hlo->shape().tuple_shapes(i), hlo, i)); }; std::function<HloInstruction*(HloInstruction*, HloInstruction*)> select = [&](HloInstruction* t, HloInstruction* f) { if (f->shape().IsToken()) { return computation->AddInstruction( HloInstruction::CreateAfterAll({t, f})); } if (f->shape().IsArray()) { return computation->AddInstruction(HloInstruction::CreateTernary( f->shape(), HloOpcode::kSelect, condition_broadcast(f->shape()), t, f)); } std::vector<HloInstruction*> selects; const int64_t tuple_element_count = ShapeUtil::TupleElementCount(f->shape()); selects.reserve(tuple_element_count); for (int64_t i = 0; i < tuple_element_count; ++i) { selects.push_back(select(gte(t, i), gte(f, i))); } return computation->AddInstruction( HloInstruction::CreateTuple(selects)); }; TF_RETURN_IF_ERROR(computation->ReplaceInstruction( conditional, select(true_call_op, false_call_op))); TF_RETURN_IF_ERROR(CallInliner::Inline(false_call_op).status()); TF_RETURN_IF_ERROR(CallInliner::Inline(true_call_op).status()); return true; } static bool ComputationCallsChannelInstructions( const HloComputation& computation) { std::vector<const HloComputation*> worklist = {&computation}; while (!worklist.empty()) { const HloComputation* work = worklist.back(); worklist.pop_back(); for (const HloInstruction* instruction : work->instructions()) { if (DynCast<HloChannelInstruction>(instruction) != nullptr) { return true; } worklist.insert(worklist.end(), instruction->called_computations().begin(), instruction->called_computations().end()); } } return false; } static bool InstructionCallsChannelInstructions( const HloInstruction& instruction) { for (const HloComputation* called_computation : instruction.called_computations()) { if (ComputationCallsChannelInstructions(*called_computation)) { return true; } } return false; } absl::StatusOr<bool> ConditionalSimplifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 3, "ConditionalSimplifier::Run(), before:\n" + module->ToString()); bool changed = false; std::vector<HloInstruction*> conditional_ops; for (auto* comp : module->computations(execution_threads)) { for (auto* instr : comp->MakeInstructionPostOrder()) { if (instr->opcode() == HloOpcode::kConditional) { if (InstructionCallsChannelInstructions(*instr)) { continue; } if (instr->has_sharding()) { continue; } conditional_ops.push_back(instr); } } } absl::flat_hash_set<HloInstruction*> removed_conditionals; for (HloInstruction* conditional_op : conditional_ops) { changed |= MergeDuplicateTupleElements(conditional_op); changed |= RemoveUnusedTupleElements(conditional_op); changed |= ReplaceRootWithEmptyTupleIfNoUsers(conditional_op); TF_ASSIGN_OR_RETURN(bool result, TryRemoveConditional(conditional_op)); if (result) { removed_conditionals.insert(conditional_op); changed = true; } } absl::flat_hash_map<HloComputation*, absl::flat_hash_set<HloInstruction*>> calling_conditionals; std::vector<HloComputation*> calling_computationals_vector; for (HloInstruction* conditional : conditional_ops) { if (removed_conditionals.contains(conditional)) { continue; } for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) { auto* branch_comp = conditional->branch_computation(branch); if (!calling_conditionals.contains(branch_comp)) { calling_computationals_vector.push_back(branch_comp); } calling_conditionals[branch_comp].insert(conditional); } } for (auto* comp : calling_computationals_vector) { auto entry = calling_conditionals.find(comp); CHECK(entry != calling_conditionals.end()); TF_ASSIGN_OR_RETURN(bool result, TryRemoveUnusedConditionalOperands( entry->first, entry->second)); changed |= result; } XLA_VLOG_LINES(3, "ConditionalSimplifier::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/conditional_simplifier.h" #include <string> #include <utility> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal_util.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class ConditionalSimplifierTest : public HloTestBase { public: HloComputation* MakeConditional(HloModule* module, bool is_constant = true); }; HloComputation* ConditionalSimplifierTest::MakeConditional(HloModule* module, bool is_constant) { HloComputation::Builder builder(TestName()); HloComputation* true_computation; { HloComputation::Builder true_computation_builder(TestName() + ".true_computation"); auto param = true_computation_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(S32, {}), "param")); auto one = true_computation_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1))); true_computation_builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, one)); true_computation = module->AddEmbeddedComputation(true_computation_builder.Build()); } HloComputation* false_computation; { HloComputation::Builder false_computation_builder(TestName() + ".false_computation"); auto param = false_computation_builder.AddInstruction( HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(S32, {}), "param")); auto forty_two = false_computation_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(42))); false_computation_builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, forty_two)); false_computation = module->AddEmbeddedComputation(false_computation_builder.Build()); } auto false_instrn = builder.AddInstruction( is_constant ? HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)) : HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(PRED, {}), "cond")); auto false_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(S32, {}), "false_param")); auto one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1))); builder.AddInstruction(HloInstruction::CreateConditional( ShapeUtil::MakeShape(S32, {}), false_instrn, one, true_computation, false_param, false_computation)); return module->AddEntryComputation(builder.Build()); } TEST_F(ConditionalSimplifierTest, ConditionalGetsInlined) { auto m = CreateNewVerifiedModule(); HloComputation* computation = MakeConditional(m.get()); ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value()); EXPECT_THAT(computation->root_instruction(), op::Add(op::Parameter(), op::Constant())); } TEST_F(ConditionalSimplifierTest, BranchGetsInlined) { auto m = CreateNewVerifiedModule(); HloComputation* computation = MakeConditional(m.get(), false); ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value()); EXPECT_THAT( computation->root_instruction(), op::Select(op::Parameter(1), op::Add(op::Constant(), op::Constant()), op::Add(op::Parameter(0), op::Constant()))); } TEST_F(ConditionalSimplifierTest, ConditionalWithControlDependency) { auto m = CreateNewVerifiedModule(); HloComputation* computation = MakeConditional(m.get()); auto* true_op = computation->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); TF_ASSERT_OK( true_op->AddControlDependencyTo(computation->root_instruction())); EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value()); } TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsSend) { auto m = CreateNewVerifiedModule(); HloComputation* computation = MakeConditional(m.get()); auto* conditional = computation->root_instruction(); ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional); auto* true_computation = conditional->true_computation(); auto* token = true_computation->AddInstruction(HloInstruction::CreateToken()); auto* send = true_computation->AddInstruction(HloInstruction::CreateSend( true_computation->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))), token, 0)); true_computation->AddInstruction(HloInstruction::CreateSendDone(send)); EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value()); } TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsRecv) { auto m = CreateNewVerifiedModule(); HloComputation* computation = MakeConditional(m.get()); auto* conditional = computation->root_instruction(); ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional); auto* true_computation = conditional->true_computation(); auto* token = true_computation->AddInstruction(HloInstruction::CreateToken()); auto* recv = true_computation->AddInstruction(HloInstruction::CreateRecv( ShapeUtil::MakeShape(F32, {1}), token, 0)); true_computation->AddInstruction(HloInstruction::CreateRecvDone(recv)); EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value()); } TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsNonRemovableInstruction) { auto m = CreateNewVerifiedModule(); HloComputation* computation = MakeConditional(m.get()); auto* conditional = computation->root_instruction(); ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional); auto* false_computation = conditional->false_computation(); auto token = false_computation->AddInstruction(HloInstruction::CreateToken()); false_computation->AddInstruction(HloInstruction::CreateInfeed( ShapeUtil::MakeShape(F32, {1}), token, "config")); EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value()); } TEST_F(ConditionalSimplifierTest, TrivalOperandsRemoved) { absl::string_view hlo_string = R"( HloModule UnusedTupleOperands on_false { t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0) lhs = f32[20,40] get-tuple-element(t), index=0 rhs = f32[40,40] get-tuple-element(t), index=1 dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT result = (f32[20,40]) tuple(dot) } on_true { t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0) lhs = f32[20,40] get-tuple-element(t), index=2 rhs = f32[40,40] get-tuple-element(t), index=3 dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT result = (f32[20,40]) tuple(dot) } ENTRY main { c0_0 = f32[20,40] parameter(0) c0_1 = f32[40,40] parameter(1) c1_0 = f32[20,40] parameter(2) c1_1 = f32[40,40] parameter(3) p = pred[] parameter(4) t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) tuple(c0_0, c0_1, c1_0, c1_1) call = (f32[20,40]) call(t), to_apply=on_true ROOT result = (f32[20,40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true } )"; auto status = ParseAndReturnVerifiedModule(hlo_string); TF_ASSERT_OK(status.status()); std::unique_ptr<HloModule> module = std::move(status).value(); HloVerifier v(false, false); TF_ASSERT_OK(v.Run(module.get()).status()); EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value()); TF_ASSERT_OK(v.Run(module.get()).status()); HloInstruction* conditional = module->entry_computation()->root_instruction(); EXPECT_TRUE(conditional != nullptr); EXPECT_EQ(conditional->operand(1)->shape().tuple_shapes().size(), 2); EXPECT_EQ(conditional->operand(2)->shape().tuple_shapes().size(), 2); HloInstruction* call = FindInstruction(module.get(), "call"); EXPECT_EQ( call->to_apply()->parameter_instruction(0)->shape().tuple_shapes().size(), 4); } TEST_F(ConditionalSimplifierTest, TwoConditionalsCreatedInReversedLexicalOrder) { absl::string_view hlo_string = R"( HloModule DeadConditional computation.1 { param.1 = s64[] parameter(0) constant.1 = s64[] constant(1) ROOT add.1 = s64[] add(param.1, constant.1) } computation.2 { param.2 = s64[] parameter(0) constant.2 = s64[] constant(2) ROOT add.2 = s64[] add(param.2, constant.2) } computation.3 { param.3 = s64[] parameter(0) constant.3 = s64[] constant(3) ROOT add.3 = s64[] add(param.3, constant.3) } computation.4 { param.4 = s64[] parameter(0) constant.4 = s64[] constant(4) ROOT add.4 = s64[] add(param.4, constant.4) } ENTRY KernelEntry { param.1 = s64[] parameter(0) param.2 = s64[] parameter(1) param.3 = s64[] parameter(2) param.4 = pred[] parameter(3) conditional_1 = s64[] conditional(param.4, param.3, param.2), true_computation=computation.3, false_computation=computation.4 constant.1 = pred[] constant(false) ROOT conditional_2 = s64[] conditional(constant.1, conditional_1, param.1), true_computation=computation.1, false_computation=computation.2 })"; auto status = ParseAndReturnVerifiedModule(hlo_string); TF_ASSERT_OK(status.status()); std::unique_ptr<HloModule> module = std::move(status).value(); HloVerifier v(false, false); TF_ASSERT_OK(v.Run(module.get()).status()); HloInstruction* conditional_1 = FindInstruction(module.get(), "conditional_1"); HloInstruction* conditional_1_clone = conditional_1->parent()->AddInstruction(conditional_1->Clone()); TF_ASSERT_OK(conditional_1->ReplaceAllUsesWith(conditional_1_clone)); TF_ASSERT_OK(conditional_1->parent()->RemoveInstruction(conditional_1)); EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value()); } TEST_F(ConditionalSimplifierTest, RemoveDeadRoots) { absl::string_view hlo_string = R"( HloModule RemoveDeadRoots on_false { t = (f32[20,40], f32[40,40]) parameter(0) lhs = f32[20,40] get-tuple-element(t), index=0 rhs = f32[40,40] get-tuple-element(t), index=1 dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} after-all = token[] after-all() outfeed = token[] outfeed(dot, after-all) ROOT result = (f32[20,40]) tuple(dot) } on_true { t = (f32[20,40], f32[40,40]) parameter(0) lhs = f32[20,40] get-tuple-element(t), index=0 add = f32[20,40] add(lhs, lhs) ROOT result = (f32[20,40]) tuple(add) } ENTRY main { c0_0 = f32[20,40] parameter(0) c0_1 = f32[40,40] parameter(1) p = pred[] parameter(2) t = (f32[20,40], f32[40,40]) tuple(c0_0, c0_1) conditional = (f32[20, 40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true ROOT result = () tuple() } )"; auto status = ParseAndReturnVerifiedModule(hlo_string); TF_ASSERT_OK(status.status()); HloVerifier v(false, false); TF_ASSERT_OK(v.Run(status.value().get()).status()); EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value()); TF_ASSERT_OK(v.Run(status.value().get()).status()); HloInstruction* conditional = FindInstruction(status.value().get(), "conditional"); EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 0); } TEST_F(ConditionalSimplifierTest, SecondTupleElementUnusedAndRemoved) { absl::string_view hlo_string = R"( HloModule SecondTupleElementUnusedAndRemoved on_true { arg_tuple.7 = (f32[10,10]{1,0}) parameter(0) get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0 copy = f32[10,10]{1,0} copy(get-tuple-element.9) ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9) } on_false { constant.17 = f32[] constant(0) constant.18 = f32[] constant(1) rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform arg_tuple.14 = (f32[10,10]{1,0}) parameter(0) get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0 ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16) } ENTRY main { constant.38 = pred[] constant(true) arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0) get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1 tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21) conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false get-first-index = f32[10,10]{1,0} get-tuple-element(conditional), index=0 ROOT result = (f32[10,10]{1,0}) tuple(get-first-index) } )"; auto status = ParseAndReturnVerifiedModule(hlo_string); TF_ASSERT_OK(status.status()); HloVerifier v(false, false); TF_ASSERT_OK(v.Run(status.value().get()).status()); EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value()); TF_ASSERT_OK(v.Run(status.value().get()).status()); const HloInstruction* conditional = FindInstruction(status.value().get(), "conditional"); EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); } TEST_F(ConditionalSimplifierTest, FirstTupleElementUnusedAndRemoved) { absl::string_view hlo_string = R"( HloModule FirstTupleElementUnusedAndRemoved on_true { arg_tuple.7 = (f32[10,10]{1,0}) parameter(0) get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0 copy = f32[10,10]{1,0} copy(get-tuple-element.9) ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9) } on_false { constant.17 = f32[] constant(0) constant.18 = f32[] constant(1) rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform arg_tuple.14 = (f32[10,10]{1,0}) parameter(0) get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0 ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16) } ENTRY main { constant.38 = pred[] constant(true) arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0) get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1 tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21) conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false get-second-index = f32[10,10]{1,0} get-tuple-element(conditional), index=1 ROOT result = (f32[10,10]{1,0}) tuple(get-second-index) } )"; auto status = ParseAndReturnVerifiedModule(hlo_string); TF_ASSERT_OK(status.status()); HloVerifier v(false, false); TF_ASSERT_OK(v.Run(status.value().get()).status()); EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value()); TF_ASSERT_OK(v.Run(status.value().get()).status()); const HloInstruction* conditional = FindInstruction(status.value().get(), "conditional"); EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); } TEST_F(ConditionalSimplifierTest, MergeDuplicateTupleElements) { absl::string_view hlo_string = R"( HloModule MergeDuplicateTupleElements on_true { param-true = (f32[]) parameter(0) gte-true = f32[] get-tuple-element(param-true), index=0 ROOT tuple-true = (f32[], f32[]) tuple(gte-true, gte-true) } on_false { param-false = (f32[]) parameter(0) constant.0 = f32[] constant(0) constant.1 = f32[] constant(1) rng = f32[] rng(constant.0, constant.1), distribution=rng_uniform ROOT tuple-false = (f32[], f32[]) tuple(rng, rng) } ENTRY main { comp = pred[] parameter(0) arg = (f32[]) parameter(1) conditional = (f32[], f32[]) conditional(comp, arg, arg), true_computation=on_true, false_computation=on_false gte.0 = f32[] get-tuple-element(conditional), index=0 gte.1 = f32[] get-tuple-element(conditional), index=1 ROOT add = f32[] add(gte.0, gte.1) } )"; auto status = ParseAndReturnVerifiedModule(hlo_string); TF_ASSERT_OK(status.status()); HloVerifier v(false, false); TF_ASSERT_OK(v.Run(status.value().get()).status()); EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value()); TF_ASSERT_OK(v.Run(status.value().get()).status()); const HloInstruction* conditional = FindInstruction(status.value().get(), "conditional"); EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); const HloInstruction* gte_0 = FindInstruction(status.value().get(), "gte.0"); const HloInstruction* gte_1 = FindInstruction(status.value().get(), "gte.1"); EXPECT_EQ(gte_0->tuple_index(), 0); EXPECT_EQ(gte_1->tuple_index(), 0); } TEST_F(ConditionalSimplifierTest, SimplifyConditionalWithTokens) { absl::string_view hlo_string = R"( HloModule SimplifyConditionalWithTokens true_comp { ROOT parameter.13 = (token[]) parameter(0) } false_comp { ROOT parameter.21 = (token[]) parameter(0) } ENTRY entry { parameter.29 = pred[] parameter(0) token.1 = token[] after-all() token.2 = token[] after-all() tuple.3 = (token[]) tuple(token.1) tuple.4 = (token[]) tuple(token.2) ROOT conditional.5 = (token[]) conditional(parameter.29, tuple.3, tuple.4), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_string)); HloVerifier v(false, false); TF_ASSERT_OK(v.Run(module.get()).status()); EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AfterAll( op::GetTupleElement(op::Tuple(op::AfterAll()), 0), op::GetTupleElement(op::Tuple(op::AfterAll()), 0)))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0a481887-ac82-46c5-9d5c-6fcea92ba784
cpp
tensorflow/tensorflow
collectives_schedule_linearizer
third_party/xla/xla/service/collectives_schedule_linearizer.cc
third_party/xla/xla/service/collectives_schedule_linearizer_test.cc
#include "xla/service/collectives_schedule_linearizer.h" #include <algorithm> #include <list> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_reachability.h" #include "tsl/platform/errors.h" namespace xla { absl::StatusOr<bool> CollectivesScheduleLinearizer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (is_enabled_ && !is_enabled_(module)) { return false; } bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { std::unique_ptr<HloReachabilityMap> reachability; HloInstruction* prev_done = nullptr; for (HloInstruction* inst : computation->MakeInstructionPostOrder()) { auto* next = DynCast<HloCollectiveInstruction>(inst); if (!next) { continue; } if (!reachability) { reachability = HloReachabilityMap::Build(computation); } HloInstruction* start = next; HloInstruction* done = next; switch (next->opcode()) { case HloOpcode::kAllReduceStart: case HloOpcode::kAllGatherStart: case HloOpcode::kCollectivePermuteStart: case HloOpcode::kAsyncStart: CHECK_EQ(start->user_count(), 1); done = start->users()[0]; break; default: break; } if (prev_done && !reachability->IsConnected(start, prev_done)) { TF_RETURN_IF_ERROR(prev_done->AddControlDependencyTo(next)); VLOG(1) << "Adding control dependency from " << prev_done->ToString() << " to " << start->ToString(); changed = true; } prev_done = done; } } return changed; } }
#include "xla/service/collectives_schedule_linearizer.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/pattern_matcher.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" namespace xla { namespace { namespace m = match; int64_t CountControlEdges(const HloComputation& computation) { int64_t count = 0; for (const auto& instruction : computation.instructions()) { count += instruction->control_successors().size(); } return count; } class CollectivesScheduleLinearizerTest : public HloTestBase { protected: void InsertCollectivesSchedule(HloModule* module) { CollectivesScheduleLinearizer collectives_schedule_linearizer; ASSERT_IS_OK(collectives_schedule_linearizer.Run(module).status()); } }; TEST_F(CollectivesScheduleLinearizerTest, FixOrdering) { absl::string_view hlo_string = R"( HloModule module sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT out = f32[] add(a, b) } ENTRY entry { p0 = f32[100] parameter(0), parameter_replication={false} p1 = f32[100] parameter(1), parameter_replication={false} c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum ROOT out = f32[100] add(c1, c2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); InsertCollectivesSchedule(module.get()); EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1); HloInstruction *c1 = nullptr, *c2 = nullptr; for (HloInstruction* instr : module->entry_computation()->instructions()) { if (Match(instr, m::AllReduce(m::Parameter(0)))) { c1 = instr; } if (Match(instr, m::AllReduce(m::Parameter(1)))) { c2 = instr; } } EXPECT_TRUE(c1 != nullptr && c2 != nullptr); EXPECT_TRUE(absl::c_linear_search(c2->control_predecessors(), c1)); } TEST_F(CollectivesScheduleLinearizerTest, NoFixRequired) { absl::string_view hlo_string = R"( HloModule module sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT out = f32[] add(a, b) } ENTRY entry { p0 = f32[100] parameter(0), parameter_replication={false} p1 = f32[100] parameter(1), parameter_replication={false} c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum, control-predecessors={c1} ROOT out = f32[100] add(c1, c2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); InsertCollectivesSchedule(module.get()); EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1); } TEST_F(CollectivesScheduleLinearizerTest, DependentCollectives) { absl::string_view hlo_string = R"( HloModule module sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT out = f32[] add(a, b) } ENTRY entry { p0 = f32[100] parameter(0), parameter_replication={false} p1 = f32[100] parameter(1), parameter_replication={false} c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum c2 = f32[100] all-reduce(c1), replica_groups={}, to_apply=sum ROOT out = f32[100] add(c1, c2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); InsertCollectivesSchedule(module.get()); EXPECT_EQ(CountControlEdges(*module->entry_computation()), 0); } TEST_F(CollectivesScheduleLinearizerTest, NonPostorder) { absl::string_view hlo_string = R"( HloModule module sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT out = f32[] add(a, b) } ENTRY entry { p0 = f32[100] parameter(0), parameter_replication={false} p1 = f32[100] parameter(1), parameter_replication={false} c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum c3 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum t = f32[100] add(c1, c2) ROOT out = f32[100] add(t, c3) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); ASSERT_IS_OK( module->entry_computation() ->GetInstructionWithName("c3") ->AddControlDependencyTo( module->entry_computation()->GetInstructionWithName("c1"))); InsertCollectivesSchedule(module.get()); EXPECT_EQ(CountControlEdges(*module->entry_computation()), 2); } TEST_F(CollectivesScheduleLinearizerTest, AsyncOrdering) { absl::string_view hlo_string = R"( HloModule module sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT out = f32[] add(a, b) } ENTRY entry { p0 = f32[100] parameter(0), parameter_replication={false} p1 = f32[100] parameter(1), parameter_replication={false} ars0 = f32[100] all-reduce-start(p0), replica_groups={}, to_apply=sum ard0 = f32[100] all-reduce-done(ars0) ars1 = f32[100] all-reduce-start(p1), replica_groups={}, to_apply=sum ard1 = f32[100] all-reduce-done(ars1) ROOT out = f32[100] add(ard0, ard1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); InsertCollectivesSchedule(module.get()); EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1); const HloInstruction *root = module->entry_computation()->root_instruction(); const HloInstruction *ard0 = root->operand(0); const HloInstruction *ard1 = root->operand(1); EXPECT_EQ(ard0->opcode(), HloOpcode::kAllReduceDone); EXPECT_EQ(ard1->opcode(), HloOpcode::kAllReduceDone); const HloInstruction *ars1 = ard1->operand(0); EXPECT_EQ(ars1->opcode(), HloOpcode::kAllReduceStart); EXPECT_TRUE(absl::c_linear_search(ars1->control_predecessors(), ard0)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collectives_schedule_linearizer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collectives_schedule_linearizer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7cf7d152-112a-442f-9cb2-370feb836587
cpp
tensorflow/tensorflow
gather_expander
third_party/xla/xla/service/gather_expander.cc
third_party/xla/xla/service/gather_expander_test.cc
#include "xla/service/gather_expander.h" #include <utility> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/literal_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/while_util.h" #include "xla/util.h" namespace xla { namespace { absl::StatusOr<HloInstruction*> TransposeIndexVectorDimToLast( HloInstruction* start_indices, int64_t index_vector_dim) { const Shape& start_indices_shape = start_indices->shape(); if (start_indices_shape.dimensions_size() == index_vector_dim) { return start_indices; } if (index_vector_dim == (start_indices_shape.dimensions_size() - 1)) { return start_indices; } std::vector<int64_t> permutation; permutation.reserve(start_indices_shape.dimensions_size()); for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) { if (i != index_vector_dim) { permutation.push_back(i); } } permutation.push_back(index_vector_dim); return MakeTransposeHlo(start_indices, permutation); } absl::StatusOr<HloInstruction*> CanonicalizeGatherIndices( HloInstruction* start_indices, int64_t index_vector_dim) { TF_ASSIGN_OR_RETURN( HloInstruction * transposed_start_indices, TransposeIndexVectorDimToLast(start_indices, index_vector_dim)); bool indices_are_scalar = index_vector_dim == start_indices->shape().dimensions_size(); const int64_t index_dims_in_start_indices = indices_are_scalar ? 0 : 1; const Shape& shape = transposed_start_indices->shape(); if (shape.dimensions_size() == index_dims_in_start_indices) { return PrependDegenerateDims(transposed_start_indices, 1); } else { return CollapseFirstNDims( transposed_start_indices, shape.dimensions_size() - index_dims_in_start_indices); } } absl::StatusOr<HloInstruction*> AdjustBatchDimsInAccumulator( const Shape& start_indices_shape, HloInstruction* accumulator, int64_t index_vector_dim) { std::vector<int64_t> batch_dim_bounds; batch_dim_bounds.reserve(start_indices_shape.dimensions_size()); for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) { if (i != index_vector_dim) { batch_dim_bounds.push_back(start_indices_shape.dimensions(i)); } } if (batch_dim_bounds.empty()) { return ElideDegenerateDims(accumulator, {0}); } return ExpandFirstDimIntoNDims(accumulator, batch_dim_bounds); } absl::StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace( HloInstruction* index_vector, const GatherDimensionNumbers& dim_numbers, int64_t operand_rank) { HloComputation* computation = index_vector->parent(); const Shape& index_shape = index_vector->shape(); if (operand_rank == 0) { return computation->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateFromDimensions(index_shape.element_type(), {0}))); } HloInstruction* zero = computation->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1}))); std::vector<HloInstruction*> expanded_index_components; for (int i = 0; i < operand_rank; i++) { int64_t index_vector_dim_index = FindIndex(dim_numbers.start_index_map(), i); if (index_vector_dim_index != dim_numbers.start_index_map_size()) { TF_ASSIGN_OR_RETURN( HloInstruction * component_to_concat, MakeSliceHlo(index_vector, {index_vector_dim_index}, {index_vector_dim_index + 1}, {1})); expanded_index_components.push_back(component_to_concat); } else { expanded_index_components.push_back(zero); } } return MakeConcatHlo(expanded_index_components, 0); } absl::StatusOr<std::vector<HloInstruction*>> GatherLoopBody( const HloInstruction& gather, HloInstruction* induction_var, const std::vector<HloInstruction*>& incoming_loop_state) { const GatherDimensionNumbers& dim_numbers = gather.gather_dimension_numbers(); CHECK_EQ(incoming_loop_state.size(), 3); HloInstruction* const operand = incoming_loop_state[0]; HloInstruction* const start_indices = incoming_loop_state[1]; HloInstruction* const output_accumulator = incoming_loop_state[2]; bool has_scalar_indices = start_indices->shape().dimensions_size() == 1; CHECK_EQ(has_scalar_indices, dim_numbers.index_vector_dim() == gather.operand(1)->shape().dimensions_size()); HloInstruction* induction_var_as_vector = MakeBroadcastHlo(induction_var, {}, {1}); HloInstruction* index_vector; if (has_scalar_indices) { TF_ASSIGN_OR_RETURN( index_vector, MakeDynamicSliceHlo(start_indices, induction_var_as_vector, {1})); } else { TF_ASSIGN_OR_RETURN( HloInstruction * index_into_start_indices, PadVectorWithZeros(induction_var_as_vector, 0, 1)); int64_t index_vector_size = start_indices->shape().dimensions(1); TF_ASSIGN_OR_RETURN( HloInstruction * index_vector_2d, MakeDynamicSliceHlo(start_indices, index_into_start_indices, {1, index_vector_size})); TF_ASSIGN_OR_RETURN(index_vector, ElideDegenerateDims(index_vector_2d, {0})); } TF_ASSIGN_OR_RETURN( HloInstruction * gathered_slice_start, ExpandIndexVectorIntoOperandSpace(index_vector, dim_numbers, operand->shape().dimensions_size())); TF_ASSIGN_OR_RETURN(HloInstruction * gathered_slice, MakeDynamicSliceHlo(operand, gathered_slice_start, gather.gather_slice_sizes())); TF_ASSIGN_OR_RETURN( HloInstruction* const gathered_slice_with_dims_collapsed, ElideDegenerateDims(gathered_slice, dim_numbers.collapsed_slice_dims())); TF_ASSIGN_OR_RETURN( HloInstruction* const gathered_slice_for_update, PrependDegenerateDims(gathered_slice_with_dims_collapsed, 1)); TF_ASSIGN_OR_RETURN( HloInstruction* const index_vector_into_accumulator, PadVectorWithZeros( induction_var_as_vector, 0, gathered_slice_with_dims_collapsed->shape().dimensions_size())); TF_ASSIGN_OR_RETURN( HloInstruction* const updated_accumulator, MakeDynamicUpdateSliceHlo(output_accumulator, gathered_slice_for_update, index_vector_into_accumulator)); return absl::StatusOr<std::vector<HloInstruction*>>{ {operand, start_indices, updated_accumulator}}; } HloInstruction* CreateGatherLoopAccumulatorInitValue( HloComputation* computation, PrimitiveType element_type, absl::Span<const int64_t> slice_sizes, int64_t gather_loop_trip_count, const GatherDimensionNumbers& dim_numbers) { std::vector<int64_t> accumulator_state_shape_dims; accumulator_state_shape_dims.reserve(1 + slice_sizes.size()); accumulator_state_shape_dims.push_back(gather_loop_trip_count); for (int64_t i = 0; i < slice_sizes.size(); i++) { if (!absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) { accumulator_state_shape_dims.push_back(slice_sizes[i]); } } return BroadcastZeros(computation, element_type, accumulator_state_shape_dims); } absl::StatusOr<HloInstruction*> PermuteBatchAndOffsetDims( HloInstruction* accumulator, absl::Span<const int64_t> offset_dims, int64_t output_rank) { std::vector<int64_t> permutation; permutation.reserve(output_rank); int64_t batch_idx_counter = 0; int64_t offset_idx_counter = output_rank - offset_dims.size(); for (int64_t i = 0; i < output_rank; i++) { bool is_offset_dim = absl::c_binary_search(offset_dims, i); if (is_offset_dim) { permutation.push_back(offset_idx_counter++); } else { permutation.push_back(batch_idx_counter++); } } return MakeTransposeHlo(accumulator, permutation); } int64_t GatherLoopTripCount(HloInstruction* gather_instr) { HloInstruction* start_indices = gather_instr->mutable_operand(1); const Shape& start_indices_shape = start_indices->shape(); const GatherDimensionNumbers& dim_numbers = gather_instr->gather_dimension_numbers(); int64_t trip_count = 1; for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) { if (i != dim_numbers.index_vector_dim()) { trip_count *= start_indices_shape.dimensions(i); } } return trip_count; } int64_t GatherIsBroadcast(HloInstruction* gather_instr) { return absl::c_equal(gather_instr->gather_slice_sizes(), gather_instr->operand(0)->shape().dimensions()); } } absl::StatusOr<HloInstruction*> GatherExpander::ExpandInstruction( HloInstruction* gather_instr) { CHECK(!ShapeUtil::IsZeroElementArray(gather_instr->shape())); if (GatherIsBroadcast(gather_instr)) { if (ShapeUtil::IsZeroElementArray(gather_instr->operand(0)->shape())) { return MakeScalarLike(gather_instr, 0); } Shape broadcast_operand_shape = ShapeUtil::DeleteDimensions( gather_instr->gather_dimension_numbers().collapsed_slice_dims(), gather_instr->operand(0)->shape()); TF_ASSIGN_OR_RETURN(HloInstruction * broadcast_operand, MakeReshapeHlo(broadcast_operand_shape, gather_instr->mutable_operand(0))); gather_instr->SetupDerivedInstruction(broadcast_operand); HloInstruction* broadcast = MakeBroadcastHlo(broadcast_operand, gather_instr->gather_dimension_numbers().offset_dims(), gather_instr->shape()); gather_instr->SetupDerivedInstruction(broadcast); return broadcast; } HloComputation* computation = gather_instr->parent(); HloInstruction* operand = gather_instr->mutable_operand(0); HloInstruction* start_indices = gather_instr->mutable_operand(1); const Shape& output_shape = gather_instr->shape(); int64_t output_rank = output_shape.dimensions_size(); const GatherDimensionNumbers& dim_numbers = gather_instr->gather_dimension_numbers(); int64_t gather_loop_trip_count = GatherLoopTripCount(gather_instr); if (!IsInt32(gather_loop_trip_count)) { return Unimplemented( "Gather operations with more than 2147483647 gather indices are not " "supported. This error occurred for %s.", gather_instr->ToString()); } TF_ASSIGN_OR_RETURN( HloInstruction * canonical_start_indices, CanonicalizeGatherIndices(start_indices, dim_numbers.index_vector_dim())); CHECK_EQ(gather_loop_trip_count, canonical_start_indices->shape().dimensions(0)); HloInstruction* accumulator_init = CreateGatherLoopAccumulatorInitValue( computation, output_shape.element_type(), gather_instr->gather_slice_sizes(), gather_loop_trip_count, gather_instr->gather_dimension_numbers()); absl::StatusOr<std::vector<HloInstruction*>> gather_loop_result_or_error = WhileUtil::MakeCountedLoop( computation, gather_loop_trip_count, {operand, canonical_start_indices, accumulator_init}, [&](HloInstruction* indvar, const std::vector<HloInstruction*>& loop_state) { return GatherLoopBody(*gather_instr, indvar, loop_state); }, gather_instr->metadata()); TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> gather_loop_result, gather_loop_result_or_error); HloInstruction* accumulator_result = gather_loop_result.back(); TF_ASSIGN_OR_RETURN( HloInstruction* const accumulator_with_batch_dims_decanonicalized, AdjustBatchDimsInAccumulator(start_indices->shape(), accumulator_result, dim_numbers.index_vector_dim())); return PermuteBatchAndOffsetDims(accumulator_with_batch_dims_decanonicalized, dim_numbers.offset_dims(), output_rank); } bool GatherExpander::InstructionMatchesPattern(HloInstruction* inst) { return inst->opcode() == HloOpcode::kGather && !ShapeUtil::IsZeroElementArray(inst->shape()) && (mode_ == kEliminateAllGathers || GatherLoopTripCount(inst) == 1 || absl::c_equal(inst->gather_slice_sizes(), inst->operand(0)->shape().dimensions())); } }
#include "xla/service/gather_expander.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_macros.h" namespace xla { namespace { using GatherExpanderTest = HloTestBase; TEST_F(GatherExpanderTest, ErrorStatusOnTooManyIndices) { const std::string hlo_text = R"( HloModule TensorFlowGatherMultipleBatchDims ENTRY main { operand = s32[3,3] parameter(0) indices = s32[2147483647,5] parameter(1) ROOT gather = s32[2147483647,3,5] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=2, slice_sizes={3, 1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); absl::Status status = GatherExpander{GatherExpander::kEliminateAllGathers} .Run(module.get()) .status(); EXPECT_EQ(status.code(), tsl::error::UNIMPLEMENTED); ASSERT_THAT( status.message(), ::testing::HasSubstr("Gather operations with more than 2147483647 gather " "indices are not supported.")); } TEST_F(GatherExpanderTest, AvoidDegenerateDims) { const std::string hlo_text = R"( HloModule TensorFlowGatherV2 ENTRY main { operand = s32[3,3] parameter(0) indices = s32[2] parameter(1) ROOT gather = s32[3,2] gather(operand, indices), offset_dims={0}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={3, 1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TF_ASSERT_OK_AND_ASSIGN( bool changed, GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get())); ASSERT_TRUE(changed); HloInstruction* while_instr = nullptr; for (auto* instr : module->entry_computation()->instructions()) { if (instr->opcode() == HloOpcode::kWhile) { ASSERT_EQ(while_instr, nullptr) << "Expected exactly one while instruction in the entry computation " "after gather expansion"; while_instr = instr; } } ASSERT_NE(while_instr, nullptr) << "Expected exactly one while instruction in the entry computation " "after gather expansion"; const Shape& while_shape = while_instr->shape(); ASSERT_TRUE(while_shape.IsTuple()); ASSERT_EQ(ShapeUtil::TupleElementCount(while_shape), 4); EXPECT_TRUE(ShapeUtil::SameDimensions( ShapeUtil::MakeShape(S32, {3, 3}), ShapeUtil::GetTupleElementShape(while_shape, 1))); EXPECT_TRUE(ShapeUtil::SameDimensions( ShapeUtil::MakeShape(S32, {2}), ShapeUtil::GetTupleElementShape(while_shape, 2))); EXPECT_TRUE(ShapeUtil::SameDimensions( ShapeUtil::MakeShape(S32, {2, 3}), ShapeUtil::GetTupleElementShape(while_shape, 3))); } TEST_F(GatherExpanderTest, CheckOpMetadata) { const std::string hlo_text = R"( HloModule TensorFlowGatherV2 ENTRY main { operand = s32[3,3] parameter(0) indices = s32[2] parameter(1) ROOT gather = s32[3,2] gather(operand, indices), offset_dims={0}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={3, 1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); OpMetadata metadata; metadata.set_op_name("Gather"); module->entry_computation()->root_instruction()->set_metadata(metadata); TF_ASSERT_OK_AND_ASSIGN( bool changed, GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get())); ASSERT_TRUE(changed); HloInstruction* while_instr = nullptr; for (auto* instr : module->entry_computation()->instructions()) { if (instr->opcode() == HloOpcode::kWhile) { ASSERT_EQ(while_instr, nullptr) << "Expected exactly one while instruction in the entry computation " "after gather expansion"; while_instr = instr; } } ASSERT_NE(while_instr, nullptr) << "Expected exactly one while instruction in the entry computation " "after gather expansion"; EXPECT_EQ(while_instr->metadata().op_name(), "Gather"); } TEST_F(GatherExpanderTest, EliminateSimpleGathersSkipsNontrivialGather) { const std::string hlo_text = R"( HloModule TensorFlowGatherV1 ENTRY main { operand = s32[3,3] parameter(0) indices = s32[2] parameter(1) ROOT gather = s32[2,3] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1, 3} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GatherExpander pass(GatherExpander::kEliminateSimpleGathers); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); ASSERT_FALSE(changed); } TEST_F(GatherExpanderTest, EliminateSimpleGathersRewritesTrivialGather) { const std::string hlo_text = R"( HloModule test ENTRY main { operand = s32[100] parameter(0) indices = s32[1] parameter(1) ROOT gather = s32[10] gather(operand, indices), offset_dims={0}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=0, slice_sizes={10} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GatherExpander pass(GatherExpander::kEliminateAllGathers); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); ASSERT_TRUE(changed); ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(), {HloOpcode::kGather})); } TEST_F(GatherExpanderTest, GatherIsBroadcast) { const std::string hlo_text = R"( HloModule test ENTRY main { operand = s32[1,3] parameter(0) indices = s32[7,5] parameter(1) ROOT gather = s32[7,3,5] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,3} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text)); GatherExpander pass(GatherExpander::kEliminateSimpleGathers); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); ASSERT_TRUE(changed); ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(), {HloOpcode::kGather})); ASSERT_TRUE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(), {HloOpcode::kBroadcast})); module->VerifyOrAddFailure("after-gather-expander."); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_expander.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_expander_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3393ebe0-0d02-4328-8e95-0a40a35cb820
cpp
tensorflow/tensorflow
tree_reduction_rewriter
third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter.cc
third_party/xla/xla/service/cpu/tests/tree_reduction_rewriter_test.cc
#include "xla/service/gpu/transforms/tree_reduction_rewriter.h" #include <algorithm> #include <cmath> #include <cstdint> #include <iterator> #include <memory> #include <utility> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/numeric/bits.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/gpu/reduction_utils.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { absl::InlinedVector<int64_t, 2> GetSortedReducedDims( HloReduceInstruction *reduce) { absl::InlinedVector<int64_t, 2> reduced_dims{reduce->dimensions().begin(), reduce->dimensions().end()}; absl::c_sort(reduced_dims); return reduced_dims; } bool IsMinMaxReduction(HloReduceInstruction *reduce) { HloComputation *called = &reduce->to_apply()[0]; if (auto reduction_kind = MatchReductionComputation(called)) { return reduction_kind == ReductionKind::MAX || reduction_kind == ReductionKind::MIN; } return false; } } class ReductionRewriterVisitor : public DfsHloRewriteVisitor { public: explicit ReductionRewriterVisitor(se::GpuComputeCapability gpu_version) : gpu_version_(gpu_version) {} absl::Status HandleReduce(HloInstruction *hlo) override { auto *reduce = Cast<HloReduceInstruction>(hlo); VLOG(3) << "Reduction instruction: " << reduce->ToString(); const HloModuleConfig &config = reduce->GetModule()->config(); if (!MatchReductionForSplit(reduce, config)) { return absl::OkStatus(); } ReductionDimensions reduction_dims = GetReductionKindAndContiguousComponents(*hlo); if (ReductionIsRaceFree(config, reduction_dims)) { VLOG(3) << "Base case: dimensions fit"; return absl::OkStatus(); } auto sorted_dims_to_reduce = GetSortedReducedDims(reduce); CHECK_LE(sorted_dims_to_reduce.size(), 2); if (reduction_dims.is_row_reduction && reduction_dims .dimensions[ReductionDimensions::kRowMajorReducedDimension] > BatchedReductionRaceFreeBound()) { VLOG(2) << "Splitting batched dimension reduce into a separate reduction"; return RewriteBatchDimensionLargerThanTile(reduce, reduction_dims, sorted_dims_to_reduce); } SplitParams split_params = ComputeSplitParams(reduce, reduction_dims, sorted_dims_to_reduce); return SplitReductionDimension(reduce, split_params, sorted_dims_to_reduce); } private: bool MatchReductionForSplit(HloReduceInstruction *reduce, const HloModuleConfig &config) { bool reductions_via_mlir_disabled = config.debug_options().xla_gpu_mlir_emitter_level() < 4; if (reductions_via_mlir_disabled && IsMinMaxReduction(reduce)) { VLOG(1) << "Not performing tree expansion on min/max-reduction: " << reduce->ToString() << " since min/max operations are associative"; return false; } if (!IsReductionFromOrToContiguousDimensions(*reduce)) { VLOG(3) << "Is not a reduction from or to contiguous dimensions"; return false; } VLOG(3) << "Perform rewrite"; return true; } bool ShouldSwapInnerAndOuterReducedMinorDimension(uint64_t k1, uint64_t k2, uint64_t n, int64_t race_free_bound, bool is_row_reduction) { CHECK(k1 >= k2); if (k1 > race_free_bound) { return false; } if (is_row_reduction) { bool maybe_vectorized = k2 % 2 == 0 && n % 2 == 0; if (maybe_vectorized) { return k2 * 2 < k1 || k1 % 2 == 0; } return n % 2 == 0 || k1 % 2 != 0; } return true; } struct SplitParams { int64_t k1; int64_t k2; int64_t dim; }; SplitParams ComputeSplitParams( HloReduceInstruction *reduce, const ReductionDimensions &reduction_dims, absl::Span<const int64_t> sorted_dims_to_reduce) { absl::Span<int64_t const> input_shape_dims = reduce->inputs()[0]->shape().dimensions(); int64_t reduced_dim = sorted_dims_to_reduce.back(); int64_t reduced_dim_size = input_shape_dims[reduced_dim]; VLOG(3) << "reduced dim size = " << reduced_dim_size; uint64_t k2 = static_cast<uint64_t>(std::floor(std::sqrt(reduced_dim_size))); int64_t race_free_bound = ReductionDimensionRaceFreeBound( reduce->GetModule()->config(), reduction_dims); if (k2 > race_free_bound) { k2 = race_free_bound; } uint64_t minimum_padding = (k2 - reduced_dim_size % k2) % k2; uint64_t best_k1 = (reduced_dim_size + minimum_padding) / k2; for (uint64_t i = k2 - 1; i > k2 / 2; --i) { uint64_t padding = (i - reduced_dim_size % i) % i; if (padding < minimum_padding || (padding == minimum_padding && absl::has_single_bit(i))) { minimum_padding = padding; best_k1 = (reduced_dim_size + padding) / i; } } uint64_t padded_k = reduced_dim_size + minimum_padding; uint64_t best_k2 = padded_k / best_k1; if (ShouldSwapInnerAndOuterReducedMinorDimension( best_k1, best_k2, reduced_dim_size, race_free_bound, reduction_dims.is_row_reduction)) { std::swap(best_k1, best_k2); } return SplitParams{static_cast<int64_t>(best_k1), static_cast<int64_t>(best_k2), reduced_dim}; } absl::Status SplitReductionDimension( HloReduceInstruction *reduce, const SplitParams &split_params, absl::Span<const int64_t> sorted_dims_to_reduce) { absl::Span<int64_t const> reduce_input_dims = reduce->inputs()[0]->shape().dimensions(); int64_t split_dim_size = reduce_input_dims[split_params.dim]; VLOG(2) << "dimension to split = " << split_params.dim << " with " << split_dim_size << " elements into " << split_params.k1 << " by " << split_params.k2; HloInstruction::InstructionVector padded_inputs(reduce->inputs().begin(), reduce->inputs().end()); auto padded_size = split_params.k1 * split_params.k2; absl::InlinedVector<int64_t, 3> padded_dimensions(reduce_input_dims.begin(), reduce_input_dims.end()); if (split_dim_size != padded_size) { padded_dimensions[split_params.dim] = padded_size; PaddingConfig padding_config = MakeNoPaddingConfig(reduce_input_dims.size()); padding_config.mutable_dimensions(split_params.dim) ->set_edge_padding_high(padded_size - split_dim_size); for (int input_idx = 0; input_idx < padded_inputs.size(); ++input_idx) { auto &reduction_input = padded_inputs[input_idx]; Shape padded_shape = ShapeUtil::MakeShape( reduction_input->shape().element_type(), padded_dimensions); VLOG(2) << "Generated padded shape: " << padded_shape.ToString(); reduction_input = reduce->parent()->AddInstruction( HloInstruction::CreatePad(padded_shape, reduction_input, reduce->init_values()[input_idx], padding_config), &reduction_input->metadata()); } } absl::InlinedVector<int64_t, 3> reshaped_dimensions; int64_t input_rank = reduce_input_dims.size(); for (int64_t dim_idx = 0; dim_idx < input_rank; dim_idx++) { if (dim_idx == split_params.dim) { reshaped_dimensions.push_back(split_params.k1); reshaped_dimensions.push_back(split_params.k2); } else { reshaped_dimensions.push_back(padded_dimensions[dim_idx]); } } absl::InlinedVector<int64_t, 2> inner_reduce_dims( sorted_dims_to_reduce.begin(), sorted_dims_to_reduce.end()); auto split_dim_it = std::find(inner_reduce_dims.begin(), inner_reduce_dims.end(), split_params.dim); *split_dim_it += 1; absl::InlinedVector<int64_t, 1> outer_reduce_dims{ split_params.dim - std::distance(inner_reduce_dims.begin(), split_dim_it)}; absl::InlinedVector<int64_t, 3> inner_reduce_shape = RemoveElements(inner_reduce_dims, reshaped_dimensions); HloInstruction::InstructionVector reshaped_padded_inputs; absl::InlinedVector<Shape, 2> inner_reduce_shapes; for (HloInstruction *padded_input : padded_inputs) { Shape reshaped_shape = ShapeUtil::MakeShape( padded_input->shape().element_type(), reshaped_dimensions); HloInstruction *reshaped_padded_input = reduce->parent()->AddInstruction( HloInstruction::CreateBitcast(reshaped_shape, padded_input), &padded_input->metadata()); VLOG(2) << "Generated reshape: " << reshaped_padded_input->ToString(); reshaped_padded_inputs.push_back(reshaped_padded_input); inner_reduce_shapes.push_back(ShapeUtil::MakeShape( padded_input->shape().element_type(), inner_reduce_shape)); } HloInstruction *inner_reduce = reduce->parent()->AddInstruction( HloInstruction::CreateReduce( ShapeUtil::MakeMaybeTupleShape(inner_reduce_shapes), reshaped_padded_inputs, reduce->init_values(), inner_reduce_dims, reduce->to_apply()), &reduce->metadata()); VLOG(1) << "Generated inner reduction: " << inner_reduce->ToString(); std::unique_ptr<HloInstruction> outer_reduce = HloInstruction::CreateReduce( reduce->shape(), inner_reduce, reduce->init_values(), outer_reduce_dims, reduce->to_apply()); VLOG(1) << "Generated outer reduction: " << outer_reduce->ToString(); return ReplaceWithNewInstruction(reduce, std::move(outer_reduce)); } absl::Status RewriteBatchDimensionLargerThanTile( HloReduceInstruction *hlo, const ReductionDimensions &reduction_dimensions, absl::Span<const int64_t> sorted_dims_to_reduce) { CHECK(reduction_dimensions.is_row_reduction); absl::InlinedVector<Shape, 2> tuple_shapes; int64_t minor_reduction_dim = sorted_dims_to_reduce.back(); for (HloInstruction *input : hlo->inputs()) { tuple_shapes.push_back( ShapeUtil::DeleteDimension(minor_reduction_dim, input->shape())); } HloInstruction *inner_reduce = hlo->parent()->AddInstruction(HloInstruction::CreateReduce( ShapeUtil::MakeMaybeTupleShape(tuple_shapes), hlo->inputs(), hlo->init_values(), {minor_reduction_dim}, hlo->to_apply())); VLOG(1) << "Inner reduction: " << inner_reduce->ToString(); std::unique_ptr<HloInstruction> out = HloInstruction::CreateReduce( hlo->shape(), inner_reduce, hlo->init_values(), {0}, hlo->to_apply()); VLOG(1) << "Generated: " << out->ToString(); return ReplaceWithNewInstruction(hlo, std::move(out)); } se::GpuComputeCapability gpu_version_; }; absl::StatusOr<bool> TreeReductionRewriter::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { VLOG(5) << "Rewriter input: " << module->ToString(); TF_ASSIGN_OR_RETURN(bool changed, ReductionRewriterVisitor(gpu_version_) .RunOnModule(module, execution_threads)); VLOG(5) << "Rewriter output: " << module->ToString(); return changed; } } }
#include "xla/service/cpu/tests/cpu_codegen_test.h" #include "tsl/platform/test.h" namespace xla { namespace cpu { namespace { class TreeReductionRewriterTest : public CpuCodegenTest {}; TEST_F(TreeReductionRewriterTest, SimpleRewrite) { const char* hlo_text = R"( HloModule SimpleReduction add { acc = f32[] parameter(1) op = f32[] parameter(0) ROOT out = f32[] add(acc, op) } ENTRY main { input = f32[1000] parameter(0) zero = f32[] constant(0) ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %main (input: f32[1000]) -> f32[] { ; CHECK-NEXT: [[INSTR_0:%[^ ]+]] = f32[1000]{0} parameter(0) ; CHECK-NEXT: [[INSTR_1:%[^ ]+]] = f32[] constant(0) ; CHECK-NEXT: [[INSTR_2:%[^ ]+]] = f32[32]{0} reduce-window([[INSTR_0]], [[INSTR_1]]), window={size=32 stride=32 pad=12_12}, to_apply=[[INSTR_3:%[^ ]+]] ; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_2]], [[INSTR_1]]), dimensions={0}, to_apply=[[INSTR_3]] )"); } TEST_F(TreeReductionRewriterTest, RewriteMultipleDimensions) { const char* hlo_text = R"( HloModule SimpleReduction add { acc = f32[] parameter(1) op = f32[] parameter(0) ROOT out = f32[] add(acc, op) } ENTRY main { input = f32[100,100] parameter(0) zero = f32[] constant(0) ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK: [[INSTR_0:%[^ ]+]] = f32[4,4]{1,0} reduce-window([[INSTR_1:%[^ ]+]], [[INSTR_2:%[^ ]+]]), window={size=32x32 stride=32x32 pad=14_14x14_14}, to_apply=[[INSTR_3:%[^ ]+]] ; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_0]], [[INSTR_2]]), dimensions={0,1}, to_apply=[[INSTR_3]] )"); } TEST_F(TreeReductionRewriterTest, RewriteMultipleDimensionsSingleSmaller) { const char* hlo_text = R"( HloModule SimpleReduction add { acc = f32[] parameter(1) op = f32[] parameter(0) ROOT out = f32[] add(acc, op) } ENTRY main { input = f32[1000,31] parameter(0) zero = f32[] constant(0) ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK: [[INSTR_0:%[^ ]+]] = f32[32,1]{1,0} reduce-window([[INSTR_1:%[^ ]+]], [[INSTR_2:%[^ ]+]]), window={size=32x31 stride=32x31 pad=12_12x0_0}, to_apply=[[INSTR_3:%[^ ]+]] ; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_0]], [[INSTR_2]]), dimensions={0,1}, to_apply=[[INSTR_3]] )"); } TEST_F(TreeReductionRewriterTest, NoRewriteRequired) { const char* hlo_text = R"( HloModule SimpleReduction add { acc = f32[] parameter(1) op = f32[] parameter(0) ROOT out = f32[] add(acc, op) } ENTRY main { input = f32[31,31] parameter(0) zero = f32[] constant(0) ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add } )"; MatchOptimizedHlo(hlo_text, R"( )"); } TEST_F(TreeReductionRewriterTest, NoRewriteRequiredZeroDim) { const char* hlo_text = R"( HloModule SimpleReduction add { acc = f32[] parameter(1) op = f32[] parameter(0) ROOT out = f32[] add(acc, op) } ENTRY main { input = f32[3000,0] parameter(0) zero = f32[] constant(0) ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add } )"; MatchOptimizedHlo(hlo_text, R"( )"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/tree_reduction_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b94443b4-6fb3-4c90-8ccc-d0cf952d9da3
cpp
tensorflow/tensorflow
hlo_alias_analysis
third_party/xla/xla/service/hlo_alias_analysis.cc
third_party/xla/xla/service/hlo_alias_analysis_test.cc
#include "xla/service/hlo_alias_analysis.h" #include <algorithm> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/map_util.h" #include "xla/service/hlo_buffer.h" #include "xla/service/hlo_value.h" #include "xla/shape_util.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { using absl::StrAppend; namespace { using FlatValueSet = absl::flat_hash_set<const HloValue*>; void ComputeInputOutputAliasedValues(const HloValue& value, const HloDataflowAnalysis& dataflow, FlatValueSet& aliased_values) { const HloModule& module = dataflow.module(); const HloComputation& entry_computation = *module.entry_computation(); const HloInputOutputAliasConfig& io_alias_config = module.input_output_alias_config(); for (const HloPosition& pos : value.positions()) { if (pos.instruction == entry_computation.root_instruction()) { std::optional<HloInputOutputAliasConfig::Alias> aliased_input = io_alias_config.GetAliasedParameter(pos.index); if (aliased_input) { aliased_values.insert( &dataflow.GetUniqueValueAt(entry_computation.parameter_instruction( aliased_input->parameter_number), aliased_input->parameter_index)); } } } } void ComputeWhileAliasedValues(const HloValue& value, const HloDataflowAnalysis& dataflow, FlatValueSet& aliased_values) { VLOG(3) << "Compute kWhile aliases"; for (const HloUse& use : value.GetUses()) { if (use.instruction->opcode() == HloOpcode::kWhile) { const HloValue& while_value = dataflow.GetUniqueValueAt(use.instruction, use.operand_index); aliased_values.insert(&while_value); VLOG(3) << " value is init value to a while; must share buffer with " "while value " << while_value; } } if (value.defining_instruction()->opcode() == HloOpcode::kParameter) { const HloComputation* computation = value.defining_instruction()->parent(); const CallGraphNode& call_graph_node = dataflow.call_graph().GetNode(computation); for (const CallSite& callsite : call_graph_node.caller_callsites()) { if (callsite.instruction()->opcode() == HloOpcode::kWhile) { CHECK_EQ(call_graph_node.caller_callsites().size(), 1); const HloValue& while_value = dataflow.GetUniqueValueAt( callsite.instruction(), value.defining_index()); VLOG(3) << " value is parameter value of the body or condition of a " "while; must share buffer with while value " << while_value; aliased_values.insert(&while_value); } } } for (const HloPosition& position : value.positions()) { if (!position.instruction->IsRoot()) continue; const HloComputation* computation = position.instruction->parent(); const CallGraphNode& call_graph_node = dataflow.call_graph().GetNode(computation); for (const CallSite& callsite : call_graph_node.caller_callsites()) { if (callsite.instruction()->opcode() == HloOpcode::kWhile && callsite.instruction()->while_body() == computation) { CHECK_EQ(call_graph_node.caller_callsites().size(), 1) << "Call graph must have been flattened."; const HloValue& while_value = dataflow.GetUniqueValueAt(callsite.instruction(), position.index); VLOG(3) << " value @ " << position << " is root of " << callsite.instruction()->name() << "; body root and while value root must share buffer " "among them: " << while_value; aliased_values.insert(&while_value); } } } } void ComputeConditionalAliasedValues(const HloValue& value, const HloDataflowAnalysis& dataflow, FlatValueSet& aliased_values) { VLOG(3) << "Compute kConditional aliases"; for (const HloPosition& position : value.positions()) { if (!position.instruction->IsRoot()) continue; const HloComputation* computation = position.instruction->parent(); const CallGraphNode& call_graph_node = dataflow.call_graph().GetNode(computation); for (const CallSite& callsite : call_graph_node.caller_callsites()) { if (callsite.instruction()->opcode() == HloOpcode::kConditional) { CHECK_EQ(call_graph_node.caller_callsites().size(), 1); const HloValue& cond_value = dataflow.GetUniqueValueAt(callsite.instruction(), position.index); VLOG(3) << " value @ " << position << " is root of " << callsite.instruction()->name() << "; branch computation roots must share buffer among them : " << cond_value; aliased_values.insert(&cond_value); } } } } void ComputeInPlaceOperationAliasedValues(const HloValue& value, const HloDataflowAnalysis& dataflow, FlatValueSet& aliased_values) { VLOG(3) << "Compute aliases for in-place operations (e.g. " "kDynamicUpdateSlice and kScatter)"; for (const HloPosition& position : value.positions()) { HloInstruction* instruction = position.instruction; for (const auto& operand_and_output_index : HloDataflowAnalysis::GetInPlaceInputOutputPairs(instruction)) { if (position.index == operand_and_output_index.second) { const HloOperandIndex& operand_index = operand_and_output_index.first; const HloValue& operand_value = dataflow.GetUniqueValueAt( instruction->operand(operand_index.operand_number), operand_index.operand_index); VLOG(3) << " operand value " << operand_value << " aliases."; aliased_values.insert(&operand_value); } } } for (const HloUse& use : value.GetUses()) { for (const auto& operand_and_output_index : HloDataflowAnalysis::GetInPlaceInputOutputPairs(use.instruction)) { const HloOperandIndex& operand_index = operand_and_output_index.first; if (use.operand_number == operand_index.operand_number && use.operand_index == operand_index.operand_index) { const HloValue& use_value = dataflow.GetUniqueValueAt( use.instruction, operand_and_output_index.second); VLOG(3) << " use value " << use_value << " aliases."; aliased_values.insert(&use_value); } } } } FlatValueSet ComputeAliasedValues(const HloValue& value, const HloDataflowAnalysis& dataflow) { if (VLOG_IS_ON(2)) { for (const HloUse& use : value.GetUses()) { VLOG(2) << "Use of value " << value << ": " << use; } } FlatValueSet aliased_values{&value}; ComputeInputOutputAliasedValues(value, dataflow, aliased_values); ComputeWhileAliasedValues(value, dataflow, aliased_values); ComputeConditionalAliasedValues(value, dataflow, aliased_values); ComputeInPlaceOperationAliasedValues(value, dataflow, aliased_values); return aliased_values; } std::vector<HloBuffer> CreateBuffers(const HloDataflowAnalysis& dataflow) { const std::vector<HloValue*>& values = dataflow.values(); size_t num_buffers = values.size(); std::vector<FlatValueSet> buffer_values(values.size()); absl::flat_hash_map<const HloValue*, FlatValueSet*> value_to_set; value_to_set.reserve(values.size()); for (size_t i = 0; i < values.size(); ++i) { buffer_values[i].insert(values[i]); value_to_set[values[i]] = &buffer_values[i]; } for (const HloValue* value : values) { VLOG(3) << "Merging colocated values, value: " << *value; FlatValueSet aliased_values = ComputeAliasedValues(*value, dataflow); if (aliased_values.size() < 2) continue; std::vector<std::pair<FlatValueSet*, HloValue::Id>> aliased_sets; aliased_sets.reserve(aliased_values.size()); for (const HloValue* aliased : aliased_values) { aliased_sets.push_back({value_to_set[aliased], aliased->id()}); } auto key = [](const auto& set_and_id) { return std::make_pair(set_and_id.first->size(), -set_and_id.second); }; FlatValueSet* union_set = absl::c_max_element(aliased_sets, LessThanByKey(key))->first; for (auto& aliased_set_and_id : aliased_sets) { FlatValueSet* aliased_set = aliased_set_and_id.first; if ((aliased_set != union_set) && !aliased_set->empty()) { for (const HloValue* aliased_value : *aliased_set) { CHECK(union_set->insert(aliased_value).second); value_to_set[aliased_value] = union_set; } aliased_set->clear(); --num_buffers; } } } std::vector<HloBuffer> buffers; buffers.reserve(num_buffers); for (const FlatValueSet& value_set : buffer_values) { if (!value_set.empty()) { HloBuffer::Id id = buffers.size(); buffers.push_back({id, HloValueSet(value_set).TakeValues()}); } } CHECK_EQ(buffers.size(), num_buffers); return buffers; } } HloAliasAnalysis::HloAliasAnalysis(const HloModule* module) : module_(module) {} const HloBuffer& HloAliasAnalysis::GetUniqueBufferAt( const HloInstruction* instruction, const ShapeIndex& index) const { std::vector<const HloBuffer*> buffers = ComputeBuffersAt(instruction, index); CHECK_EQ(buffers.size(), 1); return *buffers[0]; } HloBuffer& HloAliasAnalysis::GetUniqueBufferAt( const HloInstruction* instruction, const ShapeIndex& index) { return GetBuffer(const_cast<const HloAliasAnalysis*>(this) ->GetUniqueBufferAt(instruction, index) .id()); } std::vector<const HloBuffer*> HloAliasAnalysis::ComputeBuffersAt( const HloInstruction* instruction, const ShapeIndex& index) const { const HloValueSet& value_set = dataflow_analysis_->GetValueSet(instruction, index); std::vector<const HloBuffer*> buffers; buffers.reserve(value_set.values().size()); for (const HloValue* value : value_set.values()) { buffers.push_back(&GetBufferContainingValue(*value)); } absl::c_sort(buffers, HloBuffer::IdLessThan); buffers.erase(std::unique(buffers.begin(), buffers.end()), buffers.end()); return buffers; } absl::Status HloAliasAnalysis::Verify() const { for (const auto& pair : value_to_buffer_) { const HloValue* value = pair.first; const HloBuffer& buffer = *pair.second; TF_RET_CHECK(absl::c_linear_search(buffer.values(), value)); } for (HloBuffer::Id id = 0; id < buffers_.size(); ++id) { const HloBuffer& buffer = buffers_[id]; TF_RET_CHECK(buffer.id() == id); HloValue::Id last_value_id = -1; for (const HloValue* value : buffer.values()) { TF_RET_CHECK(GetBufferContainingValue(*value) == buffer); TF_RET_CHECK(value->id() > last_value_id); last_value_id = value->id(); } } return absl::OkStatus(); } std::string HloAliasAnalysis::ToString() const { std::string out = absl::StrCat("HloAliasAnalysis, module ", module_->name(), "\n"); StrAppend(&out, " Buffers at each position:\n"); for (const HloComputation* computation : module_->computations()) { for (const HloInstruction* instruction : computation->instructions()) { StrAppend(&out, " ", instruction->name(), ":\n"); if (instruction->shape().IsTuple()) { ShapeUtil::ForEachSubshape( instruction->shape(), [&out, &instruction, this](const Shape&, const ShapeIndex& index) { StrAppend(&out, " tuple index ", index.ToString(), ":\n"); for (const HloBuffer* buffer : ComputeBuffersAt(instruction, index)) { StrAppend(&out, " ", buffer->ToString(), "\n"); } }); } else { for (const HloBuffer* buffer : ComputeBuffersAt(instruction, {})) { StrAppend(&out, " ", buffer->ToString(), "\n"); } } } } StrAppend(&out, " Buffers:\n"); for (const HloBuffer& buffer : buffers()) { StrAppend(&out, " ", buffer.ToString(), "\n"); StrAppend(&out, " positions:\n"); for (const HloPosition& position : buffer.ComputePositions()) { StrAppend(&out, " ", position.ToString(), "\n"); } } return out; } absl::StatusOr<std::unique_ptr<HloAliasAnalysis>> HloAliasAnalysis::Run( const HloModule* module, const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) { VLOG(2) << "HloAliasAnalysis::Run on module " << module->name(); XLA_VLOG_LINES(2, module->ToString()); auto alias_analysis = absl::WrapUnique(new HloAliasAnalysis(module)); TF_ASSIGN_OR_RETURN(alias_analysis->dataflow_analysis_, HloDataflowAnalysis::Run(*module, true, false, can_share_buffer)); size_t num_values = alias_analysis->dataflow_analysis_->values().size(); alias_analysis->buffers_ = CreateBuffers(alias_analysis->dataflow_analysis()); alias_analysis->value_to_buffer_.reserve(num_values); for (HloBuffer& buffer : alias_analysis->buffers_) { for (const HloValue* value : buffer.values()) { alias_analysis->value_to_buffer_[value] = &buffer; } } CHECK_EQ(alias_analysis->value_to_buffer_.size(), num_values); TF_DCHECK_OK(alias_analysis->Verify()); HloInstruction* root = module->entry_computation()->root_instruction(); ShapeUtil::ForEachSubshape(root->shape(), [&](const Shape& , const ShapeIndex& index) { std::vector<const HloBuffer*> buffers = alias_analysis->ComputeBuffersAt(root, index); alias_analysis->live_out_buffers_.insert(buffers.begin(), buffers.end()); }); XLA_VLOG_LINES(2, alias_analysis->ToString()); return std::move(alias_analysis); } }
#include "xla/service/hlo_alias_analysis.h" #include <memory> #include <set> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/service/flatten_call_graph.h" #include "xla/service/hlo_buffer.h" #include "xla/service/hlo_ordering.h" #include "xla/service/hlo_value.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace { using ::testing::UnorderedElementsAre; class HloAliasAnalysisTest : public HloTestBase { protected: HloAliasAnalysisTest() : HloTestBase() { module_ = CreateNewVerifiedModule(); } HloAliasAnalysis& RunAnalysis() { analysis_ = HloAliasAnalysis::Run(module_.get(), nullptr) .value(); return *analysis_; } std::vector<HloBuffer> GetBuffersAt(const HloInstruction* instruction, const ShapeIndex& index = {}) const { std::set<HloBuffer::Id> buffer_ids; for (const HloValue* value : analysis_->dataflow_analysis() .GetValueSet(instruction, index) .values()) { buffer_ids.insert(analysis_->GetBufferContainingValue(*value).id()); } std::vector<HloBuffer> buffers; buffers.reserve(buffer_ids.size()); for (HloBuffer::Id id : buffer_ids) { buffers.push_back(analysis_->GetBuffer(id)); } return buffers; } const HloValue& GetValueDefinedAt(const HloInstruction* instruction, const ShapeIndex& index = {}) const { return analysis_->dataflow_analysis().GetValueDefinedAt(instruction, index); } bool AnyValuesInSameBufferInterfere() { DependencyHloOrdering ordering(module_.get()); for (const HloBuffer& buffer : analysis_->buffers()) { for (const HloValue* value_a : buffer.values()) { for (const HloValue* value_b : buffer.values()) { if (*value_a != *value_b && ordering.MayInterfere(*value_a, *value_b, analysis_->dataflow_analysis())) { VLOG(1) << *value_a << " interferes with " << *value_b << " in buffer: " << buffer; return true; } } } } return false; } bool InstructionBuffersAreAmbiguous(const HloInstruction* instruction) const { for (const auto& pair : analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) { const HloValueSet& value_set = pair.second; const HloBuffer* buffer = nullptr; for (const HloValue* value : value_set.values()) { if (buffer == nullptr) { buffer = &analysis_->GetBufferContainingValue(*value); } else if (buffer != &analysis_->GetBufferContainingValue(*value)) { return true; } } } return false; } bool InstructionBuffersAreDistinct(const HloInstruction* instruction) const { absl::flat_hash_set<const HloBuffer*> buffers_seen; for (const auto& pair : analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) { const HloValueSet& value_set = pair.second; absl::flat_hash_set<const HloBuffer*> buffers_at_this_index; for (const HloValue* value : value_set.values()) { buffers_at_this_index.insert( &analysis_->GetBufferContainingValue(*value)); } buffers_seen.merge(buffers_at_this_index); if (!buffers_at_this_index.empty()) return false; } return true; } std::unique_ptr<HloModule> module_; std::unique_ptr<HloAliasAnalysis> analysis_; const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {}); }; TEST_F(HloAliasAnalysisTest, BinaryOperation) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto add = builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, constant1, constant2)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.buffers().size(), 3); for (const HloInstruction* instruction : {constant1, constant2, add}) { EXPECT_EQ(analysis.GetUniqueBufferAt(instruction).GetUniqueValue(), GetValueDefinedAt(instruction)); } EXPECT_FALSE(InstructionBuffersAreAmbiguous(add)); EXPECT_TRUE(InstructionBuffersAreDistinct(add)); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, TupleAndGtes) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({param0, param1})); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1)); builder.AddInstruction( HloInstruction::CreateBinary(scalar_shape_, HloOpcode::kAdd, gte0, gte1)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.buffers().size(), 4); EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}).GetUniqueValue(), GetValueDefinedAt(tuple, {})); EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {0}).GetUniqueValue(), GetValueDefinedAt(param0)); EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {1}).GetUniqueValue(), GetValueDefinedAt(param1)); EXPECT_EQ(analysis.GetUniqueBufferAt(param0), analysis.GetUniqueBufferAt(tuple, {0})); EXPECT_EQ(analysis.GetUniqueBufferAt(param0), analysis.GetUniqueBufferAt(gte0)); EXPECT_THAT( analysis.GetUniqueBufferAt(param0).ComputePositions(), UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}}, HloPosition{gte0, {}})); EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple)); EXPECT_TRUE(InstructionBuffersAreDistinct(tuple)); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, NondistinctTuple) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({param0, param1, param0})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_THAT( analysis.GetUniqueBufferAt(param0).ComputePositions(), UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}}, HloPosition{tuple, {2}})); EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple)); EXPECT_FALSE(InstructionBuffersAreDistinct(tuple)); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, ParametersWithAliasing) { const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "p0")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1)); auto negate0 = builder.AddInstruction( HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0)); auto negate1 = builder.AddInstruction( HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias( {0}, 0, {0})); TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias( {1}, 0, {1})); ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias( {1}, 0, {0})); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.GetUniqueBufferAt(gte0), analysis.GetUniqueBufferAt(tuple, {0})); EXPECT_EQ(analysis.GetUniqueBufferAt(gte1), analysis.GetUniqueBufferAt(tuple, {1})); } TEST_F(HloAliasAnalysisTest, ParametersWithCrossAliasing) { const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "p0")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias( {0}, 0, {1})); TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias( {1}, 0, {0})); ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias( {1}, 0, {1})); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.GetUniqueBufferAt(gte0), analysis.GetUniqueBufferAt(tuple, {0})); EXPECT_EQ(analysis.GetUniqueBufferAt(gte0), analysis.GetUniqueBufferAt(tuple, {1})); EXPECT_EQ(analysis.GetUniqueBufferAt(gte1), analysis.GetUniqueBufferAt(tuple, {0})); EXPECT_EQ(analysis.GetUniqueBufferAt(gte1), analysis.GetUniqueBufferAt(tuple, {1})); } TEST_F(HloAliasAnalysisTest, InputOutputAliasingWithWhile) { const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1)); auto body_tuple = body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_0, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "p0")); auto xla_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, param)); auto while_element_1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 0)); auto while_element_2 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 1)); auto negate_1 = builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kNegate, while_element_1)); auto negate_2 = builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kNegate, while_element_2)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({negate_1, negate_2})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias( {0}, 0, {0})); TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias( {1}, 0, {1})); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(), UnorderedElementsAre(&GetValueDefinedAt(param, {1}), &GetValueDefinedAt(xla_while, {1}), &GetValueDefinedAt(body_param, {1}), &GetValueDefinedAt(cond_param, {1}), &GetValueDefinedAt(add), &GetValueDefinedAt(negate_2))); EXPECT_THAT( analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(), UnorderedElementsAre( HloPosition{param, {1}}, HloPosition{xla_while, {1}}, HloPosition{while_element_2, {}}, HloPosition{body_param, {1}}, HloPosition{body_element_1, {}}, HloPosition{add, {}}, HloPosition{body_tuple, {1}}, HloPosition{tuple, {1}}, HloPosition{cond_param, {1}}, HloPosition{negate_2, {}})); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, SingleCall) { auto subbuilder = HloComputation::Builder("Subcomputation"); auto subparam0 = subbuilder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto subparam1 = subbuilder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, subparam0, subparam1)); HloComputation* called_computation = module_->AddEmbeddedComputation(subbuilder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto call = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(), UnorderedElementsAre(HloPosition{constant1, {}}, HloPosition{subparam0, {}})); EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(), UnorderedElementsAre(HloPosition{constant2, {}}, HloPosition{subparam1, {}})); EXPECT_THAT( analysis.GetUniqueBufferAt(add).ComputePositions(), UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call, {}})); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, ComputationCalledTwice) { auto subbuilder = HloComputation::Builder("Subcomputation"); auto subparam0 = subbuilder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param0")); auto subparam1 = subbuilder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, subparam0, subparam1)); HloComputation* called_computation = module_->AddEmbeddedComputation(subbuilder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto call1 = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); auto call2 = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {call1, constant2}, called_computation)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(), UnorderedElementsAre(HloPosition{constant1, {}}, HloPosition{subparam0, {}})); EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(), UnorderedElementsAre(HloPosition{constant2, {}}, HloPosition{subparam1, {}})); EXPECT_THAT( analysis.GetUniqueBufferAt(add).ComputePositions(), UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call1, {}}, HloPosition{subparam0, {}}, HloPosition{call2, {}})); EXPECT_THAT(GetBuffersAt(subparam0), UnorderedElementsAre(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(add))); EXPECT_THAT(GetBuffersAt(subparam1), UnorderedElementsAre(analysis.GetUniqueBufferAt(constant2))); EXPECT_TRUE(InstructionBuffersAreAmbiguous(subparam0)); EXPECT_FALSE(InstructionBuffersAreAmbiguous(subparam1)); EXPECT_TRUE(InstructionBuffersAreDistinct(subparam0)); EXPECT_TRUE(InstructionBuffersAreDistinct(subparam1)); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, SingleWhile) { const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1)); auto body_tuple = body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_0, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_THAT( analysis.GetUniqueBufferAt(xla_while, {}).ComputePositions(), UnorderedElementsAre(HloPosition{tuple, {}}, HloPosition{xla_while, {}}, HloPosition{body_param, {}}, HloPosition{body_tuple, {}}, HloPosition{cond_param, {}})); EXPECT_THAT( analysis.GetUniqueBufferAt(xla_while, {0}).ComputePositions(), UnorderedElementsAre( HloPosition{constant1, {}}, HloPosition{tuple, {0}}, HloPosition{xla_while, {0}}, HloPosition{body_param, {0}}, HloPosition{body_element_0, {}}, HloPosition{body_tuple, {0}}, HloPosition{cond_param, {0}})); EXPECT_THAT( analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(), UnorderedElementsAre( HloPosition{constant2, {}}, HloPosition{tuple, {1}}, HloPosition{xla_while, {1}}, HloPosition{body_param, {1}}, HloPosition{body_element_1, {}}, HloPosition{add, {}}, HloPosition{body_tuple, {1}}, HloPosition{cond_param, {1}})); EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {0}).values(), UnorderedElementsAre(&GetValueDefinedAt(constant1))); EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(), UnorderedElementsAre(&GetValueDefinedAt(constant2), &GetValueDefinedAt(xla_while, {1}), &GetValueDefinedAt(body_param, {1}), &GetValueDefinedAt(cond_param, {1}), &GetValueDefinedAt(add))); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, SequentialWhiles) { const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1)); body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_0, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while0 = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, tuple)); auto xla_while1 = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while0)); auto xla_while2 = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while1)); module_->AddEntryComputation(builder.Build()); FlattenCallGraph flattener; TF_ASSERT_OK(flattener.Run(module_.get()).status()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}), analysis.GetUniqueBufferAt(xla_while2, {})); EXPECT_EQ(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(xla_while2, {0})); EXPECT_EQ(analysis.GetUniqueBufferAt(constant2), analysis.GetUniqueBufferAt(xla_while2, {1})); } TEST_F(HloAliasAnalysisTest, NestedWhiles) { const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_}); auto build_cond_computation = [&tuple_shape]() { auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); return cond_builder.Build(); }; HloComputation* condition1 = module_->AddEmbeddedComputation(build_cond_computation()); HloComputation* condition2 = module_->AddEmbeddedComputation(build_cond_computation()); auto inner_builder = HloComputation::Builder("inner_body"); auto inner_param = inner_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto inner_element_0 = inner_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 0)); auto inner_element_1 = inner_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 1)); auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, inner_element_0, inner_element_1)); inner_builder.AddInstruction( HloInstruction::CreateTuple({inner_element_0, add})); HloComputation* inner_body = module_->AddEmbeddedComputation(inner_builder.Build()); auto outer_builder = HloComputation::Builder("outer_body"); auto outer_param = outer_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto outer_element_0 = outer_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 0)); auto negate = outer_builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kNegate, outer_element_0)); auto outer_element_1 = outer_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 1)); auto outer_tuple = outer_builder.AddInstruction( HloInstruction::CreateTuple({negate, outer_element_1})); auto nested_while = outer_builder.AddInstruction(HloInstruction::CreateWhile( tuple_shape, condition1, inner_body, outer_tuple)); HloComputation* outer_body = module_->AddEmbeddedComputation(outer_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto entry_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition2, outer_body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(entry_while, {0})); EXPECT_EQ(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(nested_while, {0})); EXPECT_EQ(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(inner_element_0)); EXPECT_EQ(analysis.GetUniqueBufferAt(constant2), analysis.GetUniqueBufferAt(entry_while, {1})); EXPECT_EQ(analysis.GetUniqueBufferAt(constant2), analysis.GetUniqueBufferAt(nested_while, {1})); EXPECT_EQ(analysis.GetUniqueBufferAt(constant2), analysis.GetUniqueBufferAt(inner_element_1)); EXPECT_FALSE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, SwizzlingWhile) { const Shape tuple_shape = ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_, scalar_shape_}); auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto body_element_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0)); auto body_element_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto body_element_2 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 2)); body_builder.AddInstruction(HloInstruction::CreateTuple( {body_element_1, body_element_2, body_element_0})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto cond_constant = cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2, constant3})); auto xla_while = builder.AddInstruction( HloInstruction::CreateWhile(tuple_shape, condition, body, tuple)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_THAT( analysis.buffers(), UnorderedElementsAre(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(tuple, {}), analysis.GetUniqueBufferAt(cond_constant))); EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}), analysis.GetUniqueBufferAt(xla_while, {1})); EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}), analysis.GetUniqueBufferAt(xla_while, {2})); EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}), analysis.GetUniqueBufferAt(constant1)); EXPECT_EQ(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(constant2)); EXPECT_EQ(analysis.GetUniqueBufferAt(constant1), analysis.GetUniqueBufferAt(constant3)); EXPECT_TRUE(AnyValuesInSameBufferInterfere()); } TEST_F(HloAliasAnalysisTest, Bitcast) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto bitcast = builder.AddInstruction( HloInstruction::CreateBitcast(scalar_shape_, constant)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); const HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.buffers().size(), 1); EXPECT_EQ(analysis.GetUniqueBufferAt(constant), analysis.GetUniqueBufferAt(bitcast)); } TEST_F(HloAliasAnalysisTest, DynamicUpdateSlice) { Shape shape = ShapeUtil::MakeShape(F32, {8}); Shape update_shape = ShapeUtil::MakeShape(F32, {4}); Shape index_shape = ShapeUtil::MakeShape(S32, {}); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, shape, "param0")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, update_shape, "param1")); auto param2 = builder.AddInstruction( HloInstruction::CreateParameter(2, index_shape, "param2")); auto copy0 = builder.AddInstruction( HloInstruction::CreateUnary(shape, HloOpcode::kCopy, param0)); auto dynamic_update_slice = builder.AddInstruction( HloInstruction::CreateDynamicUpdateSlice(shape, copy0, param1, {param2})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); HloAliasAnalysis& analysis = RunAnalysis(); EXPECT_EQ(analysis.GetUniqueBufferAt(copy0), analysis.GetUniqueBufferAt(dynamic_update_slice)); } TEST_F(HloAliasAnalysisTest, DynamicUpdateSliceMultiOutputFusion) { absl::string_view hlo_string = R"( HloModule Module fused_computation { param0 = f32[1280,1,128] parameter(0) param1 = f32[1280,1,128] parameter(1) param2 = f32[1280,1,128] parameter(2) constant.1 = f32[] constant(0) broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={} constant.3 = s32[] constant(0) add.1 = f32[1280,1,128] add(param0, param0) dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param1, broadcast.6, constant.3, constant.3, constant.3) dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(param2, broadcast.6, constant.3, constant.3, constant.3) ROOT tuple.1 = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add.1, dynamic-update-slice.5, dynamic-update-slice.6) } ENTRY main { param = f32[1280,1,128] parameter(0) negate0 = f32[1280,1,128] negate(param) negate1 = f32[1280,1,128] negate(param) negate2 = f32[1280,1,128] negate(param) ROOT fusion = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) fusion(negate0, negate1, negate2), kind=kLoop, calls=fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string)); SCOPED_TRACE(module_->ToString()); HloAliasAnalysis& analysis = RunAnalysis(); LOG(INFO) << analysis.ToString(); const HloInstruction* fusion = module_->entry_computation()->GetInstructionWithName("fusion"); const HloInstruction* negate0 = module_->entry_computation()->GetInstructionWithName("negate0"); const HloInstruction* negate1 = module_->entry_computation()->GetInstructionWithName("negate1"); const HloInstruction* negate2 = module_->entry_computation()->GetInstructionWithName("negate2"); EXPECT_EQ(analysis.GetUniqueBufferAt(negate1), analysis.GetUniqueBufferAt(fusion, {1})); EXPECT_EQ(analysis.GetUniqueBufferAt(negate2), analysis.GetUniqueBufferAt(fusion, {2})); EXPECT_NE(analysis.GetUniqueBufferAt(negate0), analysis.GetUniqueBufferAt(fusion, {0})); } TEST_F(HloAliasAnalysisTest, ChainedDynamicUpdateSliceFusion) { absl::string_view hlo_string = R"( HloModule Module fused_computation { param0 = f32[1280,1,128] parameter(0) constant.1 = f32[] constant(0) broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={} constant.3 = s32[] constant(0) dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3) ROOT dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3) } ENTRY main { param = f32[1280,1,128] parameter(0) negate0 = f32[1280,1,128] negate(param) ROOT fusion = f32[1280,1,128] fusion(negate0), kind=kLoop, calls=fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string)); SCOPED_TRACE(module_->ToString()); HloAliasAnalysis& analysis = RunAnalysis(); LOG(INFO) << analysis.ToString(); const HloInstruction* fusion = module_->entry_computation()->GetInstructionWithName("fusion"); const HloInstruction* negate0 = module_->entry_computation()->GetInstructionWithName("negate0"); EXPECT_NE(analysis.GetUniqueBufferAt(negate0), analysis.GetUniqueBufferAt(fusion)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_alias_analysis.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_alias_analysis_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e59be81d-78e8-4178-b423-a8826f3c6fff
cpp
tensorflow/tensorflow
scatter_simplifier
third_party/xla/xla/service/scatter_simplifier.cc
third_party/xla/xla/service/scatter_simplifier_test.cc
#include "xla/service/scatter_simplifier.h" #include <algorithm> #include <cstdint> #include <iterator> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/permutation_util.h" #include "xla/service/call_inliner.h" #include "xla/service/gather_scatter_utils.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::StatusOr<HloInstruction*> FlattenAndTransposeUpdates( HloInstruction* updates, absl::Span<const int64_t> update_window_dims, absl::Span<const int64_t> inserted_window_dims, int64_t scatter_indices_size) { int64_t updates_rank = updates->shape().rank(); std::vector<int64_t> permutation; const int64_t num_scatter_dims = updates_rank - update_window_dims.size(); permutation.reserve(updates_rank); for (int i = 0; i < updates_rank; ++i) { if (!absl::c_linear_search(update_window_dims, i)) { permutation.push_back(i); } } absl::c_copy(update_window_dims, std::back_inserter(permutation)); TF_ASSIGN_OR_RETURN(updates, MaybeTranspose(updates, permutation)); if (num_scatter_dims > 1) { TF_ASSIGN_OR_RETURN(updates, CollapseFirstNDims(updates, num_scatter_dims)); } else if (num_scatter_dims == 0) { TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, {0})); } if (!inserted_window_dims.empty()) { std::vector<int64_t> new_dims; new_dims.reserve(inserted_window_dims.size()); for (int64_t i : inserted_window_dims) { new_dims.push_back(i + 1); } TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, new_dims)); } return updates; } std::vector<int64_t> MakeUpdatePermutation( const std::vector<int64_t>& operand_permutation) { std::vector<int64_t> update_permutation; update_permutation.reserve(operand_permutation.size() + 1); update_permutation.push_back(0); for (auto& dim : operand_permutation) { update_permutation.push_back(dim + 1); } return update_permutation; } absl::StatusOr<std::vector<HloInstruction*>> TransformScatterUpdates( HloScatterInstruction* scatter, const std::vector<int64_t>& update_permutation, int64_t scatter_indices_size) { std::vector<HloInstruction*> scatter_updates; const auto& attrs = scatter->scatter_dimension_numbers(); scatter_updates.reserve(scatter->scatter_updates().size()); for (auto* update : scatter->scatter_updates()) { TF_ASSIGN_OR_RETURN( scatter_updates.emplace_back(), FlattenAndTransposeUpdates(update, attrs.update_window_dims(), attrs.inserted_window_dims(), scatter_indices_size)); } return MaybeTranspose(scatter_updates, update_permutation); } ScatterDimensionNumbers MakeScatterDimensionNumbers( int64_t operand_rank, int64_t scatter_indices_vector_size) { ScatterDimensionNumbers dim_numbers; dim_numbers.mutable_update_window_dims()->Reserve( static_cast<int>(operand_rank)); for (int i = 0; i < operand_rank; ++i) { dim_numbers.add_update_window_dims(1 + i); } dim_numbers.mutable_scatter_dims_to_operand_dims()->Reserve( static_cast<int>(scatter_indices_vector_size)); for (int i = 0; i < scatter_indices_vector_size; ++i) { dim_numbers.add_scatter_dims_to_operand_dims(i); } dim_numbers.set_index_vector_dim(1); return dim_numbers; } } absl::StatusOr<HloInstruction*> ScatterSimplifier::ExpandInstruction( HloInstruction* inst) { auto* scatter = Cast<HloScatterInstruction>(inst); if (scatter->called_computations().size() != 1) { return InvalidArgumentStrCat( "Expected scatter->called_computations() to have exactly one element, " "got ", scatter->called_computations().size()); } HloComputation* called_computation = scatter->called_computations().front(); const auto& attrs = scatter->scatter_dimension_numbers(); const int operand_rank = attrs.update_window_dims().size() + attrs.inserted_window_dims().size(); if (operand_rank == 0) { absl::InlinedVector<HloInstruction*, 2> scatter_operands_and_updates; scatter_operands_and_updates.reserve(2 * scatter->operand_count()); absl::c_copy(scatter->scatter_operands(), std::back_inserter(scatter_operands_and_updates)); absl::c_copy(scatter->scatter_updates(), std::back_inserter(scatter_operands_and_updates)); auto* call_op = scatter->AddInstruction(HloInstruction::CreateCall( scatter->shape(), scatter_operands_and_updates, called_computation)); TF_RETURN_IF_ERROR(scatter->ReplaceAllUsesWith(call_op)); TF_ASSIGN_OR_RETURN(auto map, CallInliner::Inline(call_op)); return map[call_op]; } auto [operand_permutation, operand_permutation_inverse] = MakeOperandStartIndexPermutations(attrs.scatter_dims_to_operand_dims(), operand_rank); auto update_permutation = MakeUpdatePermutation(operand_permutation); TF_ASSIGN_OR_RETURN(auto* scatter_indices, TransformStartIndices(scatter->scatter_indices(), attrs.index_vector_dim())); TF_ASSIGN_OR_RETURN( auto scatter_updates, TransformScatterUpdates(scatter, update_permutation, scatter_indices->shape().dimensions(0))); TF_ASSIGN_OR_RETURN( auto scatter_operands, MaybeTranspose(scatter->scatter_operands(), operand_permutation)); auto dim_numbers = MakeScatterDimensionNumbers( operand_rank, attrs.scatter_dims_to_operand_dims().size()); Shape output_shape; if (scatter_operands.size() == 1) { output_shape = scatter_operands.front()->shape(); } else { std::vector<Shape> shapes; shapes.reserve(scatter_operands.size()); for (auto* operand : scatter_operands) { shapes.push_back(operand->shape()); } output_shape = ShapeUtil::MakeTupleShape(shapes); } auto* result = scatter->AddInstruction(HloInstruction::CreateScatter( output_shape, scatter_operands, scatter_indices, scatter_updates, called_computation, dim_numbers, scatter->indices_are_sorted(), scatter->unique_indices())); if (IsIdentityPermutation(operand_permutation)) { return result; } if (scatter->scatter_operands().size() == 1) { return MaybeTranspose(result, operand_permutation_inverse); } std::vector<HloInstruction*> result_items; result_items.reserve(scatter->scatter_operands().size()); for (int i = 0; i < scatter->scatter_operands().size(); ++i) { TF_ASSIGN_OR_RETURN(result_items.emplace_back(), MakeGetTupleElementHlo(result, i)); TF_ASSIGN_OR_RETURN( result_items.back(), MaybeTranspose(result_items.back(), operand_permutation_inverse)); } return MaybeMakeTuple(result_items); } bool ScatterSimplifier::IsSimplifiedScatter( const HloScatterInstruction* scatter) { const auto& dims = scatter->scatter_dimension_numbers(); auto operand_rank = scatter->scatter_operands().front()->shape().rank(); if (operand_rank == 0) return false; bool nonstandard_index_vector_dim = dims.index_vector_dim() != scatter->scatter_indices()->shape().rank() - 1; int64_t num_scatter_dims = scatter->scatter_updates().front()->shape().rank() - dims.update_window_dims().size(); bool scatter_indices_reordered = !IsIdentityPermutation(dims.scatter_dims_to_operand_dims()); bool scatter_dim_not_first = absl::c_linear_search(dims.update_window_dims(), 0); bool update_window_dims_sorted = absl::c_is_sorted(dims.update_window_dims()); return !(nonstandard_index_vector_dim || num_scatter_dims > 1 || scatter_indices_reordered || scatter_dim_not_first || !update_window_dims_sorted || !dims.inserted_window_dims().empty()); } bool ScatterSimplifier::InstructionMatchesPattern(HloInstruction* inst) { auto* scatter = DynCast<HloScatterInstruction>(inst); return scatter && !IsSimplifiedScatter(scatter); } }
#include "xla/service/scatter_simplifier.h" #include <optional> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { class ScatterSimplifierTest : public HloTestBase {}; TEST_F(ScatterSimplifierTest, InsertsIndexVectorAndWindowDims) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { p0 = f32[] parameter(0) p1 = f32[] parameter(1) p2 = f32[] parameter(2) p3 = f32[] parameter(3) ROOT tuple = tuple(p2, p3) } ENTRY kernel_entry { operand0 = f32[3,3] parameter(0) operand1 = f32[3,3] parameter(1) indices = s32[2] parameter(2) update0 = f32[2,3] parameter(3) update1 = f32[2,3] parameter(4) ROOT scatter = (f32[3,3], f32[3,3]) scatter(operand0, operand1, indices, update0, update1), to_apply=scatter_computation, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: %[[SCATTER_DIMS_WITH_VECTOR:.*]] = s32[2,1]{1,0} reshape(%indices) CHECK: %[[RESHAPED_UPDATES0:.*]] = f32[2,1,3]{2,1,0} reshape(%update0) CHECK: %[[RESHAPED_UPDATES1:.*]] = f32[2,1,3]{2,1,0} reshape(%update1) CHECK: ROOT %scatter = (f32[3,3]{1,0}, f32[3,3]{1,0}) scatter( CHECK-SAME: %operand0, %operand1, %[[SCATTER_DIMS_WITH_VECTOR]], CHECK-SAME: %[[RESHAPED_UPDATES0]], %[[RESHAPED_UPDATES1]]), CHECK-SAME: update_window_dims={1,2}, CHECK-SAME: inserted_window_dims={}, CHECK-SAME: scatter_dims_to_operand_dims={0}, CHECK-SAME: index_vector_dim=1, CHECK-SAME: to_apply=%scatter_computation )"); } TEST_F(ScatterSimplifierTest, CollapsesScatterDims) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { %p0 = f32[] parameter(0) ROOT result = f32[] parameter(1) } ENTRY kernel_entry { operand = f32[3,3] parameter(0) indices = s32[2,1,2] parameter(1) update = f32[2,1,1,3] parameter(2) ROOT scatter = f32[3,3] scatter(operand, indices, update), to_apply=scatter_computation, update_window_dims={2, 3}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=2 })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: %[[RESHAPED_INDICES:.*]] = s32[2,2]{1,0} reshape(%indices) CHECK: %[[RESHAPED_UPDATES:.*]] = f32[2,1,3]{2,1,0} reshape(%update) CHECK: scatter( CHECK-SAME: %[[RESHAPED_INDICES]] CHECK-SAME: %[[RESHAPED_UPDATES]] )"); } TEST_F(ScatterSimplifierTest, NoOpForSimpleScatter) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { %p0 = f32[] parameter(0) ROOT result = f32[] parameter(1) } ENTRY kernel_entry { operand = f32[3,3] parameter(0) indices = s32[2,2] parameter(1) update = f32[2,1,3] parameter(2) ROOT scatter = f32[3,3] scatter(operand, indices, update), to_apply=scatter_computation, update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1 })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), std::nullopt); } TEST_F(ScatterSimplifierTest, MovesIndexVectorDim) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { %p0 = f32[] parameter(0) ROOT result = f32[] parameter(1) } ENTRY kernel_entry { operand = f32[3,3] parameter(0) indices = s32[2,1] parameter(1) update = f32[1,3,3] parameter(2) ROOT scatter = f32[3,3] scatter(operand, indices, update), to_apply=scatter_computation, update_window_dims={1, 2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: %[[TRANSPOSED_INDICES:.*]] = s32[1,2]{1,0} CHECK-SAME: transpose(%indices), dimensions={1,0} CHECK: scatter(%operand, %[[TRANSPOSED_INDICES]], %update), CHECK-SAME: index_vector_dim=1 )"); } TEST_F(ScatterSimplifierTest, TransformsUpdatesAndOperandUsingScatterDims) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { %p0 = f32[] parameter(0) ROOT result = f32[] parameter(1) } ENTRY kernel_entry { operand = f32[3,4,5] parameter(0) indices = s32[2,2] parameter(1) update = f32[2,1,1,3] parameter(2) ROOT scatter = f32[3,4,5] scatter(operand, indices, update), to_apply=scatter_computation, update_window_dims={1, 2, 3}, inserted_window_dims={}, scatter_dims_to_operand_dims={2,0}, index_vector_dim=1 })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: %[[T_OPERAND:.*]] = f32[5,3,4]{2,1,0} transpose(%operand), CHECK-SAME: dimensions={2,0,1} CHECK: %[[T_UPDATES:.*]] = f32[2,3,1,1]{3,2,1,0} transpose(%update), CHECK-SAME: dimensions={0,3,1,2} CHECK: %[[SCATTER:.*]] = {{.*}} scatter( CHECK-SAME: %[[T_OPERAND]], %indices, %[[T_UPDATES]]) CHECK-SAME: scatter_dims_to_operand_dims={0,1}, CHECK: ROOT %{{.*}} = f32[3,4,5] CHECK-SAME: transpose(%[[SCATTER]]), dimensions={1,2,0} )"); } TEST_F(ScatterSimplifierTest, MakesScatterDimensionsLeadingInUpdates) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { %p0 = f32[] parameter(0) ROOT result = f32[] parameter(1) } ENTRY kernel_entry { operand = f32[3] parameter(0) indices = s32[1,1] parameter(1) update = f32[2,1] parameter(2) ROOT scatter = f32[3] scatter(operand, indices, update), to_apply=scatter_computation, update_window_dims={0}, inserted_window_dims={}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: %[[TRANSPOSED_UPDATES:.*]] = f32[1,2]{1,0} CHECK-SAME: transpose(%update), dimensions={1,0} CHECK: scatter( CHECK-SAME: %[[TRANSPOSED_UPDATES]] CHECK-SAME: update_window_dims={1}, )"); } TEST_F(ScatterSimplifierTest, ZeroDimScatterIndices) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { %p0 = f32[] parameter(0) ROOT result = f32[] parameter(1) } ENTRY kernel_entry { operand = f32[4,4] parameter(0) indices = s32[2] parameter(1) update = f32[3,3] parameter(2) ROOT scatter = f32[4,4]{1,0} scatter(operand, indices, update), update_window_dims={0,1}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, to_apply=scatter_computation })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: scatter( )"); } TEST_F(ScatterSimplifierTest, IsSimplifiedScatterReturnsFalseForUnsortedWindowDims) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { %p0 = f32[] parameter(0) ROOT result = f32[] parameter(1) } ENTRY kernel_entry { operand = f32[3,2] parameter(0) indices = s32[1,1] parameter(1) update = f32[1,2,2] parameter(2) ROOT scatter = f32[3,2] scatter(operand, indices, update), to_apply=scatter_computation, update_window_dims={2,1}, inserted_window_dims={}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })"; auto module = ParseAndReturnUnverifiedModule(kModuleStr).value(); auto scatter = module->entry_computation()->root_instruction(); EXPECT_FALSE(ScatterSimplifier::IsSimplifiedScatter( Cast<HloScatterInstruction>(scatter))); } TEST_F(ScatterSimplifierTest, ScatterIntoScalar) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY kernel_entry { operand = s32[] parameter(0) indices = s32[0]{0} parameter(1) updates = s32[] parameter(2) ROOT scatter = s32[] scatter(operand, indices, updates), update_window_dims={}, inserted_window_dims={}, scatter_dims_to_operand_dims={}, index_vector_dim=0, to_apply=scatter_computation } )"; auto module = ParseAndReturnUnverifiedModule(kModuleStr).value(); RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: ENTRY CHECK: %[[OPERAND:.*]] = s32[] parameter(0) CHECK: %[[UPDATES:.*]] = s32[] parameter(2) CHECK: ROOT %{{.*}} = s32[] add(%[[OPERAND]], %[[UPDATES]]) )"); } TEST_F(ScatterSimplifierTest, VariadicScatterIntoScalar) { constexpr absl::string_view kModuleStr = R"( HloModule scatter_simplifier scatter_computation { p0 = f32[] parameter(0) p1 = bf16[] parameter(1) p2 = f32[] parameter(2) p3 = bf16[] parameter(3) ROOT tuple = tuple(p2, p3) } ENTRY kernel_entry { operand0 = f32[] parameter(0) operand1 = bf16[] parameter(1) indices = s32[0]{0} parameter(2) updates0 = f32[] parameter(3) updates1 = bf16[] parameter(4) ROOT scatter = (f32[], bf16[]) scatter(operand0, operand1, indices, updates0, updates1), update_window_dims={}, inserted_window_dims={}, scatter_dims_to_operand_dims={}, index_vector_dim=0, to_apply=scatter_computation })"; RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"( CHECK: ENTRY CHECK: %[[UPDATES0:.*]] = f32[] parameter(3) CHECK: %[[UPDATES1:.*]] = bf16[] parameter(4) CHECK: ROOT %{{.*}} = (f32[], bf16[]) tuple(%[[UPDATES0]], %[[UPDATES1]]) )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
09730dec-80af-493b-9079-4dd8922a3bdf
cpp
tensorflow/tensorflow
slice_sinker
third_party/xla/xla/service/slice_sinker.cc
third_party/xla/xla/service/slice_sinker_test.cc
#include "xla/service/slice_sinker.h" #include <algorithm> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/types/span.h" #include "xla/shape_util.h" namespace xla { namespace { bool SameSliceConfiguration(const HloInstruction* slice_1, const HloInstruction* slice_2) { CHECK_EQ(slice_1->opcode(), HloOpcode::kSlice); CHECK_EQ(slice_2->opcode(), HloOpcode::kSlice); CHECK(slice_1->operand(0)->shape().dimensions() == slice_2->operand(0)->shape().dimensions()); return slice_1->slice_starts() == slice_2->slice_starts() && slice_1->slice_limits() == slice_2->slice_limits() && slice_1->slice_strides() == slice_2->slice_strides(); } bool IsElementwiseOperationOnSimilarSlices(const HloInstruction* inst) { CHECK(inst->IsElementwise()); if (absl::c_any_of(inst->operands(), [](const HloInstruction* operand) { return operand->opcode() != HloOpcode::kSlice; })) { return false; } const HloInstruction* slice0 = inst->operand(0); return absl::c_all_of(absl::MakeSpan(inst->operands()).subspan(1), [slice0](const HloInstruction* slice) { return ShapeUtil::CompatibleIgnoringElementType( slice0->operand(0)->shape(), slice->operand(0)->shape()) && SameSliceConfiguration(slice0, slice); }); } bool IsSimilarOperationOnSlices(const HloInstruction* operation_on_slices, const HloInstruction* candidate) { if (candidate->user_count() == 0) { return false; } if (!candidate->SameOp(*operation_on_slices) || operation_on_slices->shape().element_type() != candidate->shape().element_type()) { return false; } const HloInstruction* operand_slice0 = candidate->operand(0); for (int64_t i = 0; i < candidate->operand_count(); ++i) { const HloInstruction* operand_slice = candidate->operand(i); if (operand_slice->opcode() != HloOpcode::kSlice || operand_slice->operand(0) != operation_on_slices->operand(i)->operand(0) || !SameSliceConfiguration(operand_slice0, operand_slice)) { return false; } } return true; } bool ShouldTransform(const std::vector<HloInstruction*>& operations_on_slices) { int64_t sum = 0; for (HloInstruction* user : operations_on_slices) { sum += ShapeUtil::ElementsIn(user->shape()); } return sum >= xla::ShapeUtil::ElementsIn( operations_on_slices[0]->operand(0)->operand(0)->shape()); } std::optional<std::vector<HloInstruction*>> FindElementwiseOperationGroup( const HloInstruction* operation_on_slices) { std::vector<HloInstruction*> operations; const HloInstruction* slice_source0 = operation_on_slices->operand(0)->operand(0); for (const HloInstruction* operand_slice0 : slice_source0->users()) { if (operand_slice0->opcode() != HloOpcode::kSlice) { continue; } for (HloInstruction* user : operand_slice0->users()) { if (IsSimilarOperationOnSlices(operation_on_slices, user)) { operations.push_back(user); } } } return ShouldTransform(operations) ? std::make_optional(operations) : std::nullopt; } absl::Status SinkSlices( const std::vector<HloInstruction*>& slice_sources, const std::vector<HloInstruction*>& operation_on_slices) { const Shape shape = slice_sources[0]->shape(); PrimitiveType element_type = operation_on_slices[0]->shape().element_type(); Shape new_shape = ShapeUtil::ChangeElementType(shape, element_type); HloComputation* computation = operation_on_slices[0]->parent(); auto operation_on_slice_sources = computation->AddInstruction( operation_on_slices[0]->CloneWithNewOperands(new_shape, slice_sources)); VLOG(10) << "Adding operation_on_slice_sources: " << operation_on_slice_sources->ToString(); for (HloInstruction* user : operation_on_slices) { const HloInstruction* operand_slice = user->operand(0); auto user_slice = computation->AddInstruction(operand_slice->CloneWithNewOperands( user->shape(), {operation_on_slice_sources})); VLOG(10) << "Adding new slice: " << user_slice->ToString() << " to replace: " << user->ToString(); TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(user_slice)); } return absl::OkStatus(); } } absl::StatusOr<bool> SliceSinker::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* instruction : computation->MakeInstructionPostOrder()) { if (!instruction->IsElementwise() || instruction->operand_count() == 0 || instruction->user_count() == 0) { continue; } VLOG(10) << "Processing instruction : " << instruction->ToString(); if (!IsElementwiseOperationOnSimilarSlices(instruction)) { continue; } std::optional<std::vector<HloInstruction*>> similar_operations = FindElementwiseOperationGroup(instruction); if (!similar_operations.has_value()) { continue; } std::vector<HloInstruction*> slice_sources; absl::c_transform( instruction->operands(), std::back_inserter(slice_sources), [](HloInstruction* slice) { return slice->mutable_operand(0); }); TF_RETURN_IF_ERROR(SinkSlices(slice_sources, similar_operations.value())); changed = true; } } return changed; } }
#include "xla/service/slice_sinker.h" #include <memory> #include <vector> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/literal_util.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" namespace xla { namespace { namespace m = match; using ::testing::ElementsAre; class SliceSinkerTest : public HloTestBase {}; TEST_F(SliceSinkerTest, TernaryOperation) { const char* kModuleStr = R"( HloModule m test { p0 = pred[8,9] parameter(0) p1 = f32[8,9] parameter(1) p2 = f32[8,9] parameter(2) s00 = pred[2,9] slice(pred[8,9] p0), slice={[0:2], [0:9]} s01 = pred[6,9] slice(pred[8,9] p0), slice={[2:8], [0:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]} s20 = f32[2,9] slice(f32[8,9] p2), slice={[0:2], [0:9]} s21 = f32[6,9] slice(f32[8,9] p2), slice={[2:8], [0:9]} sel0 = f32[2,9] select(pred[2,9] s00, f32[2,9] s10, f32[2,9] s20) sel1 = f32[6,9] select(pred[6,9] s01, f32[6,9] s11, f32[6,9] s21) ROOT tuple = (f32[2,9], f32[6,9]) tuple(sel0, sel1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_TRUE(result); HloInstruction* inst = module->entry_computation()->root_instruction(); const HloInstruction* slice0; const HloInstruction* slice1; EXPECT_THAT(inst, GmockMatch(m::Tuple( m::Slice(&slice0, m::Select(m::Parameter(0), m::Parameter(1), m::Parameter(2))), m::Slice(&slice1, m::Select(m::Parameter(0), m::Parameter(1), m::Parameter(2)))))); EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9)); EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0)); EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1)); } TEST_F(SliceSinkerTest, OverlappingPartialSlicesBeneficial) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]} s02 = f32[8,4] slice(f32[8,9] p0), slice={[0:8], [0:4]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]} s12 = f32[8,4] slice(f32[8,9] p1), slice={[0:8], [0:4]} add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10) add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11) add2 = f32[8,4] add(f32[8,4] s02, f32[8,4] s12) ROOT tuple = (f32[2,9], f32[5,9], f32[8,4]) tuple(add0, add1, add2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_TRUE(result); HloInstruction* inst = module->entry_computation()->root_instruction(); const HloInstruction* slice0; const HloInstruction* slice1; const HloInstruction* slice2; EXPECT_THAT( inst, GmockMatch(m::Tuple( m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1)))))); EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9)); EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0)); EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 4)); EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1)); } TEST_F(SliceSinkerTest, SameSliceSourcesTwoPeerGroups) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]} s02 = f32[8,2] slice(f32[8,9] p0), slice={[0:8], [0:2]} s03 = f32[8,7] slice(f32[8,9] p0), slice={[0:8], [2:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]} s12 = f32[8,2] slice(f32[8,9] p1), slice={[0:8], [0:2]} s13 = f32[8,7] slice(f32[8,9] p1), slice={[0:8], [2:9]} add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10) add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11) mul0 = f32[8,2] multiply(f32[8,2] s02, f32[8,2] s12) mul1 = f32[8,7] multiply(f32[8,7] s03, f32[8,7] s13) ROOT tuple = (f32[2,9], f32[6,9], f32[8,2], f32[8,7]) tuple(add0, add1, mul0, mul1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_TRUE(result); HloInstruction* inst = module->entry_computation()->root_instruction(); const HloInstruction* slice0; const HloInstruction* slice1; const HloInstruction* slice2; const HloInstruction* slice3; EXPECT_THAT( inst, GmockMatch(m::Tuple( m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))), m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1)))))); EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9)); EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0)); EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice2->slice_limits(), ElementsAre(8, 2)); EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice3->slice_starts(), ElementsAre(0, 2)); EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1)); } TEST_F(SliceSinkerTest, OverlappingMultipleSlices) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[5,9] slice(f32[8,9] p0), slice={[3:8], [0:9]} s02 = f32[3,9] slice(f32[8,9] p0), slice={[2:5], [0:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[5,9] slice(f32[8,9] p1), slice={[3:8], [0:9]} s12 = f32[3,9] slice(f32[8,9] p1), slice={[2:5], [0:9]} add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10) add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11) add2 = f32[3,9] add(f32[3,9] s02, f32[3,9] s12) ROOT tuple = (f32[2,9], f32[5,9], f32[3,9]) tuple(add0, add1, add2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_TRUE(result); HloInstruction* inst = module->entry_computation()->root_instruction(); const HloInstruction* slice0; const HloInstruction* slice1; const HloInstruction* slice2; EXPECT_THAT( inst, GmockMatch(m::Tuple( m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice2, m::Add(m::Parameter(0), m::Parameter(1)))))); EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9)); EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice1->slice_starts(), ElementsAre(3, 0)); EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice2->slice_starts(), ElementsAre(2, 0)); EXPECT_THAT(slice2->slice_limits(), ElementsAre(5, 9)); EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1)); } TEST_F(SliceSinkerTest, DisjointedPartialSlices) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[5,9] slice(f32[8,9] p0), slice={[2:7], [0:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[5,9] slice(f32[8,9] p1), slice={[2:7], [0:9]} add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10) add1 = f32[5,9] add(f32[5,9] s01, f32[5,9] s11) ROOT tuple = (f32[2,9], f32[5,9]) tuple(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, OverlappingPartialSlicesNotBeneficial) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,7] slice(f32[8,9] p0), slice={[0:2], [0:7]} s01 = f32[6,7] slice(f32[8,9] p0), slice={[2:8], [0:7]} s10 = f32[2,7] slice(f32[8,9] p1), slice={[0:2], [0:7]} s11 = f32[6,7] slice(f32[8,9] p1), slice={[2:8], [0:7]} add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10) add1 = f32[6,7] add(f32[6,7] s01, f32[6,7] s11) ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, DifferentOrderingOfSliceSources) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,7] parameter(0) p1 = f32[8,7] parameter(1) s00 = f32[2,7] slice(f32[8,7] p0), slice={[0:2], [0:7]} s01 = f32[6,7] slice(f32[8,7] p0), slice={[2:8], [0:7]} s10 = f32[2,7] slice(f32[8,7] p1), slice={[0:2], [0:7]} s11 = f32[6,7] slice(f32[8,7] p1), slice={[2:8], [0:7]} add0 = f32[2,7] add(f32[2,7] s00, f32[2,7] s10) add1 = f32[6,7] add(f32[6,7] s11, f32[6,7] s01) ROOT tuple = (f32[2,7], f32[6,7]) tuple(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, SlicesFromDifferentIndices) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:4], [0:9]} s01 = f32[4,9] slice(f32[8,9] p0), slice={[4:8], [0:9]} s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:4], [0:9]} s11 = f32[4,9] slice(f32[8,9] p1), slice={[4:8], [0:9]} add0 = f32[4,9] add(f32[4,9] s01, f32[4,9] s10) add1 = f32[4,9] add(f32[4,9] s00, f32[4,9] s11) ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, DifferentOperator) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]} mul = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10) add = f32[6,9] add(f32[6,9] s01, f32[6,9] s11) ROOT tuple = (f32[2,9], f32[6,9]) tuple(mul, add) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, SameOperatorDifferentAttributes) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]} cmp1 = pred[2,9] compare(f32[2,9] s00, f32[2,9] s10), direction=GT cmp2 = pred[6,9] compare(f32[6,9] s01, f32[6,9] s11), direction=LT ROOT tuple = (pred[2,9], pred[6,9]) tuple(cmp1, cmp2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, SlicesWithMultiUsers) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]} add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] s10) add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] s11) mul0 = f32[2,9] multiply(f32[2,9] s00, f32[2,9] s10) mul1 = f32[6,9] multiply(f32[6,9] s01, f32[6,9] s11) ROOT tuple = (f32[2,9], f32[6,9], f32[2,9], f32[6,9]) tuple(add0, add1, mul0, mul1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_TRUE(result); HloInstruction* inst = module->entry_computation()->root_instruction(); const HloInstruction* slice0; const HloInstruction* slice1; const HloInstruction* slice2; const HloInstruction* slice3; EXPECT_THAT( inst, GmockMatch(m::Tuple( m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice2, m::Multiply(m::Parameter(0), m::Parameter(1))), m::Slice(&slice3, m::Multiply(m::Parameter(0), m::Parameter(1)))))); EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9)); EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0)); EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice2->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice2->slice_limits(), ElementsAre(2, 9)); EXPECT_THAT(slice2->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice3->slice_starts(), ElementsAre(2, 0)); EXPECT_THAT(slice3->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice3->slice_strides(), ElementsAre(1, 1)); } TEST_F(SliceSinkerTest, NonElementWise) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8] parameter(0) s00 = f32[2] slice(f32[8] p0), slice={[0:2]} s01 = f32[6] slice(f32[8] p0), slice={[2:8]} bc0 = f32[2,9] broadcast(f32[2] s00), dimensions={0} bc1 = f32[6,9] broadcast(f32[6] s01), dimensions={0} ROOT tuple = (f32[2,9], f32[6,9]) tuple(bc0, bc1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, SlicesWithNontrivialStrides) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[4,9] slice(f32[8,9] p0), slice={[0:7:2], [0:9]} s01 = f32[4,9] slice(f32[8,9] p0), slice={[1:8:2], [0:9]} s10 = f32[4,9] slice(f32[8,9] p1), slice={[0:7:2], [0:9]} s11 = f32[4,9] slice(f32[8,9] p1), slice={[1:8:2], [0:9]} add0 = f32[4,9] add(f32[4,9] s00, f32[4,9] s10) add1 = f32[4,9] add(f32[4,9] s01, f32[4,9] s11) ROOT tuple = (f32[4,9], f32[4,9]) tuple(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_TRUE(result); HloInstruction* inst = module->entry_computation()->root_instruction(); const HloInstruction* slice0; const HloInstruction* slice1; EXPECT_THAT( inst, GmockMatch(m::Tuple( m::Slice(&slice0, m::Add(m::Parameter(0), m::Parameter(1))), m::Slice(&slice1, m::Add(m::Parameter(0), m::Parameter(1)))))); EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice0->slice_limits(), ElementsAre(7, 9)); EXPECT_THAT(slice0->slice_strides(), ElementsAre(2, 1)); EXPECT_THAT(slice1->slice_starts(), ElementsAre(1, 0)); EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice1->slice_strides(), ElementsAre(2, 1)); } TEST_F(SliceSinkerTest, NotAllSliceOperand) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[2,9] parameter(1) p2 = f32[6,9] parameter(2) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]} abs0 = f32[2,9] abs(f32[2,9] p1) abs1 = f32[6,9] abs(f32[6,9] p2) add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0) add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1) ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } TEST_F(SliceSinkerTest, Cascade) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) p1 = f32[8,9] parameter(1) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]} s10 = f32[2,9] slice(f32[8,9] p1), slice={[0:2], [0:9]} s11 = f32[6,9] slice(f32[8,9] p1), slice={[2:8], [0:9]} abs0 = f32[2,9] abs(f32[2,9] s10) abs1 = f32[6,9] abs(f32[6,9] s11) add0 = f32[2,9] add(f32[2,9] s00, f32[2,9] abs0) add1 = f32[6,9] add(f32[6,9] s01, f32[6,9] abs1) ROOT tuple = (f32[2,9], f32[6,9]) tuple(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_TRUE(result); HloInstruction* inst = module->entry_computation()->root_instruction(); const HloInstruction* slice0; const HloInstruction* slice1; EXPECT_THAT( inst, GmockMatch(m::Tuple( m::Slice(&slice0, m::Add(m::Parameter(0), m::Abs(m::Parameter(1)))), m::Slice(&slice1, m::Add(m::Parameter(0), m::Abs(m::Parameter(1))))))); EXPECT_THAT(slice0->slice_starts(), ElementsAre(0, 0)); EXPECT_THAT(slice0->slice_limits(), ElementsAre(2, 9)); EXPECT_THAT(slice0->slice_strides(), ElementsAre(1, 1)); EXPECT_THAT(slice1->slice_starts(), ElementsAre(2, 0)); EXPECT_THAT(slice1->slice_limits(), ElementsAre(8, 9)); EXPECT_THAT(slice1->slice_strides(), ElementsAre(1, 1)); } TEST_F(SliceSinkerTest, SameOpcodeDifferentResultElementTypes) { const char* kModuleStr = R"( HloModule m test { p0 = f32[8,9] parameter(0) s00 = f32[2,9] slice(f32[8,9] p0), slice={[0:2], [0:9]} s01 = f32[6,9] slice(f32[8,9] p0), slice={[2:8], [0:9]} convert0 = s32[2,9] convert(f32[2,9] s00) convert1 = s64[6,9] convert(f32[6,9] s01) ROOT tuple = (s32[2,9], s64[6,9]) tuple(convert0, convert1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); SliceSinker slice_sinker; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&slice_sinker, module.get())); EXPECT_FALSE(result); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/slice_sinker.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/slice_sinker_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3d74cae6-8e08-4595-b6bf-2d5ca27cc59f
cpp
tensorflow/tensorflow
convert_mover
third_party/xla/xla/service/convert_mover.cc
third_party/xla/xla/service/convert_mover_test.cc
#include "xla/service/convert_mover.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/primitive_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { static bool IsLosslesslyConvertibleTo(const Literal& literal, PrimitiveType dst_ty) { PrimitiveType orig_ty = literal.shape().element_type(); absl::StatusOr<Literal> converted1 = literal.Convert(dst_ty); if (!converted1.ok()) { return false; } absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty); if (!converted2.ok()) { return false; } return literal == *converted2; } bool OpCommutesWithConvert(HloOpcode opcode) { switch (opcode) { case HloOpcode::kConcatenate: case HloOpcode::kPad: case HloOpcode::kReshape: case HloOpcode::kSlice: case HloOpcode::kTranspose: return true; default: return false; } } absl::StatusOr<bool> MoveConvertPrecisionOps(HloComputation* comp) { bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { if (!OpCommutesWithConvert(instr->opcode()) || instr->operand_count() == 0 || !absl::c_all_of(instr->operands(), [](const HloInstruction* operand) { return (operand->opcode() == HloOpcode::kConvert && operand->user_count() == 1) || operand->opcode() == HloOpcode::kConstant; })) { continue; } auto convert_op_it = absl::c_find_if(instr->operands(), HloPredicateIsOp<HloOpcode::kConvert>); if (convert_op_it == instr->operands().end()) { continue; } const HloInstruction* convert_op = *convert_op_it; if (!absl::c_all_of(instr->operands(), [&](const HloInstruction* operand) { return operand->opcode() != HloOpcode::kConvert || operand->operand(0)->shape().element_type() == convert_op->operand(0)->shape().element_type(); })) { continue; } PrimitiveType src_ty = convert_op->operand(0)->shape().element_type(); PrimitiveType dst_ty = convert_op->shape().element_type(); if (primitive_util::BitWidth(src_ty) >= primitive_util::BitWidth(dst_ty)) { continue; } if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) { return operand->opcode() == HloOpcode::kConstant && !IsLosslesslyConvertibleTo(operand->literal(), src_ty); })) { continue; } if (primitive_util::IsSubByteNonPredType(src_ty)) { continue; } VLOG(2) << "Moving increase-precision convert op " << convert_op->ToString() << " down the graph: " << instr->ToString(); absl::InlinedVector<HloInstruction*, 8> new_operands; new_operands.reserve(instr->operand_count()); for (HloInstruction* operand : instr->operands()) { switch (operand->opcode()) { case HloOpcode::kConvert: new_operands.push_back(operand->mutable_operand(0)); break; case HloOpcode::kConstant: new_operands.push_back(MakeConvertToHlo(operand, src_ty)); break; default: LOG(FATAL) << "Unexpected opcode in " << operand->ToString(); } } Shape new_shape = instr->shape(); new_shape.set_element_type(src_ty); HloInstruction* new_instr = comp->AddInstruction( instr->CloneWithNewOperands(new_shape, new_operands)); TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction( instr, HloInstruction::CreateConvert(instr->shape(), new_instr))); changed = true; } std::deque<HloInstruction*> work_queue; std::vector<HloInstruction*> instrs = comp->MakeInstructionPostOrder(); work_queue.insert(work_queue.end(), instrs.rbegin(), instrs.rend()); while (!work_queue.empty()) { HloInstruction* instr = work_queue.front(); work_queue.pop_front(); if (instr->opcode() != HloOpcode::kConvert || instr->operand(0)->user_count() != 1 || !OpCommutesWithConvert(instr->operand(0)->opcode())) { continue; } PrimitiveType src_ty = instr->operand(0)->shape().element_type(); PrimitiveType dst_ty = instr->shape().element_type(); if (primitive_util::BitWidth(src_ty) <= primitive_util::BitWidth(dst_ty)) { continue; } if (primitive_util::IsSubByteNonPredType(dst_ty)) { continue; } VLOG(2) << "Moving decrease-precision convert up the graph: " << instr->ToString(); HloInstruction* to_convert = instr->mutable_operand(0); absl::InlinedVector<HloInstruction*, 8> new_operands; new_operands.reserve(to_convert->operand_count()); for (HloInstruction* operand : to_convert->operands()) { work_queue.push_front(MakeConvertToHlo(operand, dst_ty)); new_operands.push_back(work_queue.front()); } Shape new_shape = to_convert->shape(); new_shape.set_element_type(dst_ty); TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction( instr, to_convert->CloneWithNewOperands(new_shape, new_operands))); changed = true; } return changed; } } absl::StatusOr<bool> ConvertMover::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool changed_computation, MoveConvertPrecisionOps(comp)); changed |= changed_computation; } return changed; } }
#include "xla/service/convert_mover.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace m = ::xla::match; class ConvertMoverTest : public HloTestBase { public: ConvertMoverTest() : HloTestBase(false, false) {} }; template <typename T> auto MatchConvertToS8(T&& operand) { return m::Convert(operand).WithShape(m::Shape().WithElementType(S8)); } template <typename T> auto MatchConvertToF16(T&& operand) { return m::Convert(operand).WithShape(m::Shape().WithElementType(F16)); } template <typename T> auto MatchConvertToF32(T&& operand) { return m::Convert(operand).WithShape(m::Shape().WithElementType(F32)); } template <typename T> auto MatchConvertToC64(T&& operand) { return m::Convert(operand).WithShape(m::Shape().WithElementType(C64)); } TEST_F(ConvertMoverTest, MoveDownThroughConcat) { absl::string_view module_string = R"( HloModule module ENTRY main { ROOT root = concatenate(f32[10] convert(f16[10] parameter(0)), f32[10] convert(f16[10] parameter(1))), dimensions={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(MatchConvertToF32( m::Concatenate(m::Parameter(0), m::Parameter(1))))); } TEST_F(ConvertMoverTest, NoMoveDownThroughConcatWithDifferentSrcTypes) { absl::string_view module_string = R"( HloModule module ENTRY main { ROOT root = concatenate(f32[10] convert(bf16[10] parameter(0)), f32[10] convert(f16[10] parameter(1))), dimensions={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(ConvertMoverTest, MoveUpReshape) { absl::string_view module_string = R"( HloModule module ENTRY main { ROOT root = f16[10,10] convert(f32[10,10] reshape(f32[100] parameter(0))) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Reshape(MatchConvertToF16(m::Parameter(0))))); } TEST_F(ConvertMoverTest, MoveUpTwoTransposes) { absl::string_view module_string = R"( HloModule module ENTRY main { t1 = transpose(f32[3,4] parameter(0)), dimensions={1,0} t2 = transpose(t1), dimensions={1,0} ROOT root = f16[3,4] convert(t2) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Transpose( m::Transpose(MatchConvertToF16(m::Parameter(0)))))); } TEST_F(ConvertMoverTest, MoveDownTwoSlices) { absl::string_view module_string = R"( HloModule module ENTRY main { slice1 = f32[9] slice(f32[10] convert(f16[10] parameter(0))), slice={[0:9]} ROOT slice2 = f32[8] slice(slice1), slice={[0:8]} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(MatchConvertToF32(m::Slice(m::Slice(m::Parameter(0)))))); } TEST_F(ConvertMoverTest, MoveDownC64) { absl::string_view module_string = R"( HloModule module ENTRY main { ROOT root = concatenate(c64[10] convert(f32[10] parameter(0)), c64[10] convert(f32[10] parameter(1))), dimensions={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(MatchConvertToC64(m::Concatenate( m::Parameter(0), m::Parameter(1) )))); } TEST_F(ConvertMoverTest, MoveDownC64Constant) { absl::string_view module_string = R"( HloModule module ENTRY main { ROOT root = concatenate(c64[2] convert(f32[2] parameter(0)), c64[2] convert(f32[2] parameter(1)), c64[2] constant({(1,1), (-1,-1)})), dimensions={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(ConvertMoverTest, MoveUpPad) { absl::string_view module_string = R"( HloModule module ENTRY main { pad = f32[10] pad(f32[8] parameter(0), f32[] constant(0)), padding=1_1 ROOT root = f16[10] convert(pad) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Pad(MatchConvertToF16(m::Parameter(0)), MatchConvertToF16(m::ConstantEffectiveScalar(0))))); } TEST_F(ConvertMoverTest, MoveUpPadWithOutOfRangeConstant) { absl::string_view module_string = R"( HloModule module ENTRY main { pad = s32[10] pad(s32[8] parameter(0), s32[] constant(1000)), padding=1_1 ROOT root = s8[10] convert(pad) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Pad(MatchConvertToS8(m::Parameter(0)), MatchConvertToS8(m::ConstantEffectiveScalar(1000))))); } TEST_F(ConvertMoverTest, MoveDownPad) { absl::string_view module_string = R"( HloModule module ENTRY main { ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(0)), padding=1_1 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(MatchConvertToF32(m::Pad( m::Parameter(0), MatchConvertToF16(m::ConstantEffectiveScalar(0)))))); } TEST_F(ConvertMoverTest, NoMoveDownPadBecauseConstantIsOutOfRange) { absl::string_view module_string = R"( HloModule module ENTRY main { ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(1e9)), padding=1_1 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); ConvertMover pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_mover.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_mover_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2b440a48-9e51-478c-ae66-5556f1655335
cpp
tensorflow/tensorflow
hlo_cost_analysis
third_party/xla/xla/service/hlo_cost_analysis.cc
third_party/xla/xla/service/hlo_cost_analysis_test.cc
#include "xla/service/hlo_cost_analysis.h" #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <iterator> #include <memory> #include <optional> #include <string> #include <utility> #include "absl/algorithm/container.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/tsl/lib/gtl/map_util.h" #include "xla/util.h" #include "xla/window_util.h" #include "tsl/platform/errors.h" namespace xla { HloCostAnalysis::HloCostAnalysis(const Options& options) : options_(options) {} HloCostAnalysis::HloCostAnalysis(ShapeSizeFunction shape_size, const Properties& per_second_rates, const Properties& min_latencies_seconds) : HloCostAnalysis( Options{shape_size, per_second_rates, min_latencies_seconds}) {} absl::Status HloCostAnalysis::Preprocess(const HloInstruction* hlo) { current_properties_ = Properties(); current_should_compute_bottleneck_time_ = true; float bytes_accessed = GetShapeSize(hlo->shape()); current_properties_.set_output_bytes_accessed(GetShapeSize(hlo->shape())); for (int64_t i = 0; i < hlo->operand_count(); ++i) { const HloInstruction* operand = hlo->operand(i); bytes_accessed += GetShapeSize(operand->shape()); current_properties_.set_operand_bytes_accessed( i, GetShapeSize(operand->shape())); current_properties_.set_operand_utilization(i, 1.0); } current_properties_[kBytesAccessedKey] = bytes_accessed; return absl::OkStatus(); } absl::Status HloCostAnalysis::Postprocess(const HloInstruction* hlo) { if (current_should_compute_bottleneck_time_) { float optimal_seconds = 0.0f; current_properties_.ForEach([&](absl::string_view key, float val) { if (key == kOptimalSecondsKey) { return; } float per_second_rate = options_.per_second_rate(key); if (per_second_rate != 0) { float time_for_key = std::max(val / per_second_rate, options_.min_latency_seconds(key)); optimal_seconds = std::max(optimal_seconds, time_for_key); } }); current_properties_[kOptimalSecondsKey] = optimal_seconds; } current_properties_.ForEach( [&](absl::string_view key, float val) { properties_sum_[key] += val; }); auto [it_ignored, inserted] = hlo_properties_.emplace(hlo, std::move(current_properties_)); current_properties_ = Properties(); TF_RET_CHECK(inserted); return absl::OkStatus(); } absl::Status HloCostAnalysis::RemoveInstruction(HloInstruction* instruction) { auto it = hlo_properties_.find(instruction); if (it != hlo_properties_.end()) { current_properties_ = it->second; current_properties_.ForEach( [&](absl::string_view key, float val) { properties_sum_[key] -= val; }); hlo_properties_.erase(instruction); } return absl::OkStatus(); } absl::Status HloCostAnalysis::RevisitInstruction(HloInstruction* instruction) { TF_RETURN_IF_ERROR(RemoveInstruction(instruction)); TF_RETURN_IF_ERROR(Preprocess(instruction)); TF_RETURN_IF_ERROR(instruction->Visit(this)); TF_RETURN_IF_ERROR(Postprocess(instruction)); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleElementwiseOp( const HloInstruction* hlo_instruction) { const auto& shape = hlo_instruction->shape(); auto computation_count = ShapeUtil::ElementsIn(shape); auto opcode = hlo_instruction->opcode(); if (opcode == HloOpcode::kErf || opcode == HloOpcode::kExp || opcode == HloOpcode::kLog || opcode == HloOpcode::kLogistic || opcode == HloOpcode::kPower || opcode == HloOpcode::kSqrt || opcode == HloOpcode::kCbrt || opcode == HloOpcode::kRsqrt || opcode == HloOpcode::kTanh || opcode == HloOpcode::kSin || opcode == HloOpcode::kCos || opcode == HloOpcode::kExpm1 || opcode == HloOpcode::kLog1p || opcode == HloOpcode::kAtan2 || opcode == HloOpcode::kTan) { current_properties_[kTranscendentalsKey] = computation_count; } else { current_properties_[kFlopsKey] = computation_count; } return absl::OkStatus(); } float HloCostAnalysis::GetPropertyForHlo( const HloInstruction& hlo, absl::string_view key, const HloToProperties& hlo_to_properties) { auto it = hlo_to_properties.find(&hlo); if (it == hlo_to_properties.end()) { return 0.0f; } return it->second[key]; } int64_t HloCostAnalysis::GetShapeSize(const Shape& shape) const { if (!LayoutUtil::HasLayout(shape)) { return 0; } if (LayoutUtil::IsSparseArray(shape)) { return 0; } return options_.shape_size(shape); } int64_t HloCostAnalysis::FusionParameterReadBytes( const HloInstruction* hlo) const { CHECK(hlo->IsFused() && (hlo->opcode() == HloOpcode::kParameter || hlo->opcode() == HloOpcode::kGetTupleElement)); auto handle_slice = [this](const HloInstruction* hlo, const HloInstruction* user) -> int64_t { return GetShapeSize(user->shape()); }; auto handle_dynamic_slice = [this](const HloInstruction* hlo, const HloInstruction* user, bool& seen_trivial_user) -> int64_t { if (hlo == user->operand(0)) { return GetShapeSize(user->shape()); } if (!seen_trivial_user) { seen_trivial_user = true; return GetShapeSize(hlo->shape()); } return 0; }; auto handle_dynamic_update_slice = [this](const HloInstruction* hlo, const HloInstruction* user, bool& seen_trivial_user) -> int64_t { if (hlo != user->operand(0) && !seen_trivial_user) { seen_trivial_user = true; return GetShapeSize(hlo->shape()); } return 0; }; int64_t size = 0; bool seen_trivial_user = false; for (const HloInstruction* user : hlo->users()) { switch (user->opcode()) { case HloOpcode::kFusion: { for (int64_t idx : user->OperandIndices(hlo)) { bool nested_seen_trivial_user = false; const auto& fusion_users = user->users(); const HloInstruction* root_instruction = user->fused_instructions_computation()->root_instruction(); const bool fusion_is_simple = user->fused_parameter(idx) == root_instruction->operand(0); for (const HloInstruction* fusion_user : fusion_users) { if (fusion_is_simple && fusion_user->opcode() == HloOpcode::kSlice) { size += handle_slice(user, fusion_user); } else if (fusion_is_simple && fusion_user->opcode() == HloOpcode::kDynamicSlice) { size += handle_dynamic_slice(user, fusion_user, nested_seen_trivial_user); } else if (fusion_is_simple && fusion_user->opcode() == HloOpcode::kDynamicUpdateSlice) { size += handle_dynamic_update_slice(user, fusion_user, nested_seen_trivial_user); } else if (!nested_seen_trivial_user) { nested_seen_trivial_user = true; size += FusionParameterReadBytes(user->fused_parameter(idx)); } } } break; } case HloOpcode::kSlice: size += handle_slice(hlo, user); break; case HloOpcode::kDynamicSlice: size += handle_dynamic_slice(hlo, user, seen_trivial_user); break; case HloOpcode::kDynamicUpdateSlice: size += handle_dynamic_update_slice(hlo, user, seen_trivial_user); break; case HloOpcode::kBroadcast: case HloOpcode::kReshape: size += GetShapeSize(hlo->shape()); break; default: if (!seen_trivial_user) { seen_trivial_user = true; size += GetShapeSize(hlo->shape()); } } } return size; } absl::Status HloCostAnalysis::FusionCalculateUtilizations( const HloInstruction* fusion) { for (const HloInstruction* instr : fusion->fused_instructions_computation()->instructions()) { if (ShouldFilterFusionInstruction(fusion, instr)) { hlo_properties_[instr][kUtilizationKey] = 0.f; } else { hlo_properties_[instr][kUtilizationKey] = 1.f; } } return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleElementwiseUnary( const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } absl::Status HloCostAnalysis::HandleElementwiseBinary( const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } absl::Status HloCostAnalysis::HandleCompare(const HloInstruction* compare) { return HandleElementwiseOp(compare); } absl::Status HloCostAnalysis::HandleClamp(const HloInstruction* clamp) { return HandleElementwiseOp(clamp); } absl::Status HloCostAnalysis::HandleReducePrecision(const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } absl::Status HloCostAnalysis::HandleParameter(const HloInstruction*) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; current_properties_.set_output_bytes_accessed(0); current_properties_[kOptimalSecondsKey] = 0; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleConstant(const HloInstruction*) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; current_properties_.set_output_bytes_accessed(0); current_properties_[kOptimalSecondsKey] = 0; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleIota(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleGetTupleElement( const HloInstruction* get_tuple_element) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; current_properties_.set_output_bytes_accessed(0); current_properties_.set_operand_bytes_accessed(0, 0); current_properties_[kOptimalSecondsKey] = 0; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleSelect(const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } absl::Status HloCostAnalysis::HandleReverse(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleSlice(const HloInstruction* slice) { const int64_t output_shape_size = GetShapeSize(slice->shape()); const int64_t num_input_elements = ShapeUtil::ElementsIn(slice->operand(0)->shape()); const int64_t num_output_elements = ShapeUtil::ElementsIn(slice->shape()); current_properties_[kBytesAccessedKey] = output_shape_size * 2; current_properties_.set_output_bytes_accessed(output_shape_size); current_properties_.set_operand_bytes_accessed(0, output_shape_size); current_properties_.set_operand_utilization( 0, 1.0 * num_output_elements / num_input_elements); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleDynamicSlice( const HloInstruction* dynamic_slice) { const int64_t output_shape_size = GetShapeSize(dynamic_slice->shape()); const int64_t start_indices_shape_size = GetShapeSize(dynamic_slice->operand(1)->shape()); const int64_t num_input_elements = ShapeUtil::ElementsIn(dynamic_slice->operand(0)->shape()); const int64_t num_output_elements = ShapeUtil::ElementsIn(dynamic_slice->shape()); current_properties_[kBytesAccessedKey] = output_shape_size * 2 + start_indices_shape_size; current_properties_.set_output_bytes_accessed(output_shape_size); current_properties_.set_operand_bytes_accessed(0, output_shape_size); current_properties_.set_operand_bytes_accessed(1, start_indices_shape_size); current_properties_.set_operand_utilization( 0, 1.0 * num_output_elements / num_input_elements); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleDynamicUpdateSlice( const HloInstruction* dynamic_update_slice) { const int64_t update_shape_size = GetShapeSize(dynamic_update_slice->operand(1)->shape()); const int64_t start_indices_shape_size = GetShapeSize(dynamic_update_slice->operand(2)->shape()); const int64_t num_update_elements = ShapeUtil::ElementsIn(dynamic_update_slice->operand(1)->shape()); const int64_t num_output_elements = ShapeUtil::ElementsIn(dynamic_update_slice->shape()); current_properties_[kBytesAccessedKey] = update_shape_size * 2 + start_indices_shape_size; current_properties_.set_output_bytes_accessed(update_shape_size); current_properties_.set_operand_bytes_accessed(0, 0); current_properties_.set_operand_bytes_accessed(1, update_shape_size); current_properties_.set_operand_bytes_accessed(2, start_indices_shape_size); current_properties_.set_operand_utilization( 0, 1.0 * (num_output_elements - num_update_elements) / num_output_elements); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleTuple(const HloInstruction* tuple) { current_properties_[kBytesAccessedKey] = GetShapeSize(tuple->shape()); current_properties_.set_output_bytes_accessed(GetShapeSize(tuple->shape())); for (int i = 0; i < tuple->operand_count(); ++i) { current_properties_.set_operand_bytes_accessed(i, 0); } return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleConcatenate(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleConvert(const HloInstruction* convert) { return HandleElementwiseOp(convert); } absl::Status HloCostAnalysis::HandleCopy(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleDomain(const HloInstruction* domain) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; current_properties_.set_output_bytes_accessed(0); for (int i = 0; i < domain->operand_count(); ++i) { current_properties_.set_operand_bytes_accessed(i, 0); } current_properties_[kOptimalSecondsKey] = 0; return absl::OkStatus(); } int64_t HloCostAnalysis::GetDotFlops(const Shape& lhs_shape, const Shape& result_shape, const DotDimensionNumbers& dnums) { int64_t reduction_width = 1; for (auto dim : dnums.lhs_contracting_dimensions()) { reduction_width *= lhs_shape.dimensions(dim); } return kFmaFlops * ShapeUtil::ElementsIn(result_shape) * reduction_width; } absl::Status HloCostAnalysis::HandleDot(const HloInstruction* dot) { current_properties_[kFlopsKey] = GetDotFlops( dot->operand(0)->shape(), dot->shape(), dot->dot_dimension_numbers()); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleInfeed(const HloInstruction* infeed) { int64_t size = 0; ShapeUtil::ForEachLeafShape( infeed->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { size += GetShapeSize(sub_shape); current_properties_.set_output_bytes_accessed(index, GetShapeSize(sub_shape)); }); current_properties_.set_output_bytes_accessed(size); current_properties_[kBytesAccessedKey] = size; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleOutfeed(const HloInstruction* outfeed) { current_properties_[kBytesAccessedKey] = 0; for (int64_t i = 0; i < outfeed->operand_count(); ++i) { const HloInstruction* operand = outfeed->operand(i); int64_t size = 0; ShapeUtil::ForEachLeafShape( operand->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { size += GetShapeSize(sub_shape); current_properties_.set_operand_bytes_accessed( i, index, GetShapeSize(sub_shape)); }); current_properties_.set_operand_bytes_accessed(i, size); current_properties_[kBytesAccessedKey] += size; } return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleMap(const HloInstruction* map) { TF_ASSIGN_OR_RETURN(const Properties sub_properties, ProcessSubcomputation(map->to_apply())); const int64_t element_count = ShapeUtil::ElementsIn(map->shape()); sub_properties.ForEach([&](absl::string_view key, float val) { if (KeyToCopyFromSubcomputation(key)) { current_properties_[key] = val * element_count; } }); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleReduce(const HloInstruction* reduce) { HloComputation* function = reduce->to_apply(); TF_ASSIGN_OR_RETURN(const Properties sub_properties, ProcessSubcomputation(function)); auto arg = reduce->operand(0); auto output_shape = reduce->shape().IsArray() ? reduce->shape() : reduce->shape().tuple_shapes(0); int64_t reduction_count = ShapeUtil::ElementsIn(arg->shape()) - ShapeUtil::ElementsIn(output_shape); sub_properties.ForEach([&](absl::string_view key, float val) { if (KeyToCopyFromSubcomputation(key)) { current_properties_[key] = val * reduction_count; } }); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleReduceWindow( const HloInstruction* reduce_window) { const Window& window = reduce_window->window(); auto function = reduce_window->to_apply(); TF_ASSIGN_OR_RETURN(Properties sub_properties, ProcessSubcomputation(function)); int64_t window_element_count = 1; for (const auto& dimension : window.dimensions()) { window_element_count *= dimension.size(); } const int64_t input_element_count = ShapeUtil::ElementsIn(reduce_window->operand(0)->shape()); const int64_t output_element_count = ShapeUtil::ElementsIn(reduce_window->shape().IsArray() ? reduce_window->shape() : reduce_window->shape().tuple_shapes(0)); int64_t reduction_count = (window_element_count - 1) * output_element_count; bool optimized_rw = false; int64_t logical_reduction_dim = -1; int64_t num_reduction_dimensions = absl::c_count_if( window.dimensions(), [](const WindowDimension& dim) { return (dim.size() != 1); }); int64_t num_padded_dimensions = absl::c_count_if(window.dimensions(), [](const WindowDimension& dim) { return (dim.padding_low() != 0 || dim.padding_high() != 0); }); if (num_reduction_dimensions == 1 && num_padded_dimensions == 1 && reduce_window->shape().IsArray()) { auto reduction_dim = absl::c_find_if(window.dimensions(), [](const WindowDimension& dim) { return (dim.size() != 1 && dim.padding_low() != 0 && dim.padding_high() != 0 && dim.padding_low() == dim.padding_high() && dim.size() == 2 * dim.padding_low() + 1); }); if (reduction_dim != window.dimensions().end()) { logical_reduction_dim = reduction_dim - window.dimensions().begin(); optimized_rw = reduction_dim->padding_low() == reduce_window->shape().dimensions(logical_reduction_dim) - 1; } } if (optimized_rw) { window_element_count = reduce_window->shape().dimensions(logical_reduction_dim); reduction_count = (output_element_count / window_element_count) + (window_element_count - 1); VLOG(3) << "Reduction count: " << reduction_count << " reported for reduce-window:\n" << reduce_window->ToString(); } if (options_.count_multiple_input_accesses) { current_properties_.set_operand_utilization(0, 1.0 * output_element_count * window_element_count / input_element_count); current_properties_.set_operand_bytes_accessed( 0, output_element_count * window_element_count * ShapeUtil::ByteSizeOfPrimitiveType( reduce_window->operand(0)->shape().element_type())); } sub_properties.ForEach([&](absl::string_view key, float val) { if (KeyToCopyFromSubcomputation(key)) { current_properties_[key] = val * reduction_count; } }); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleSelectAndScatter( const HloInstruction* instruction) { TF_ASSIGN_OR_RETURN(Properties select_properties, ProcessSubcomputation(instruction->select())); TF_ASSIGN_OR_RETURN(Properties scatter_properties, ProcessSubcomputation(instruction->scatter())); const auto source = instruction->operand(1); const auto source_element_count = ShapeUtil::ElementsIn(source->shape()); int64_t window_element_count = 1; for (const auto& dimension : instruction->window().dimensions()) { window_element_count *= dimension.size(); } const int64_t select_count = source_element_count * (window_element_count - 1); select_properties.ForEach([&](absl::string_view key, float val) { if (KeyToCopyFromSubcomputation(key)) { current_properties_[key] += val * select_count; } }); scatter_properties.ForEach([&](absl::string_view key, float val) { if (KeyToCopyFromSubcomputation(key)) { current_properties_[key] += val * source_element_count; } }); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleBitcast(const HloInstruction*) { current_properties_[kBytesAccessedKey] = 0; current_properties_.set_output_bytes_accessed(0); current_properties_.set_operand_bytes_accessed(0, 0); current_properties_[kOptimalSecondsKey] = 0; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleBroadcast(const HloInstruction* broadcast) { if (options_.count_multiple_input_accesses) { current_properties_.set_operand_bytes_accessed( 0, GetShapeSize(broadcast->shape())); current_properties_.set_operand_utilization( 0, 1.0 * ShapeUtil::ElementsIn(broadcast->shape()) / ShapeUtil::ElementsIn(broadcast->operand(0)->shape())); } return absl::OkStatus(); } absl::Status HloCostAnalysis::HandlePad(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAsyncStart( const HloInstruction* async_start) { TF_ASSIGN_OR_RETURN( current_properties_, ProcessSubcomputation(async_start->called_computations()[0])); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAsyncUpdate(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAsyncDone(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCopyStart(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCopyDone(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleSend(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleSendDone(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleRecv(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleRecvDone(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleReshape(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleDynamicReshape(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleBatchNormTraining(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleBatchNormInference(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleBatchNormGrad(const HloInstruction*) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleTranspose(const HloInstruction* transpose) { if (transpose->IsEffectiveBitcast()) { return HandleBitcast(transpose); } return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAfterAll(const HloInstruction* token) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; current_properties_.set_output_bytes_accessed(0); for (int i = 0; i < token->operand_count(); ++i) { current_properties_.set_operand_bytes_accessed(i, 0); } current_properties_[kOptimalSecondsKey] = 0; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAddDependency( const HloInstruction* add_dependency) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; current_properties_.set_output_bytes_accessed(0); for (int i = 0; i < add_dependency->operand_count(); ++i) { current_properties_.set_operand_bytes_accessed(i, 0); } current_properties_[kOptimalSecondsKey] = 0; return absl::OkStatus(); } int64_t HloCostAnalysis::GetConvolutionFlops( const HloInstruction* convolution) { auto lhs = convolution->operand(0); auto rhs = convolution->operand(1); const Shape& lhs_shape = lhs->shape(); const Shape& rhs_shape = rhs->shape(); const Shape& result_shape = convolution->shape(); return GetConvolutionFlops(convolution, lhs_shape, rhs_shape, result_shape); } int64_t HloCostAnalysis::GetConvolutionFlops(const HloInstruction* convolution, const Shape& lhs_shape, const Shape& rhs_shape, const Shape& result_shape) { Window window = convolution->window(); const auto& dnums = convolution->convolution_dimension_numbers(); const int64_t input_batch_dim = dnums.input_batch_dimension(); const int64_t input_feature_dim = dnums.input_feature_dimension(); const int64_t output_feature_dim = dnums.output_feature_dimension(); const int64_t input_feature = ShapeUtil::GetDimension(lhs_shape, input_feature_dim); const int64_t output_feature = ShapeUtil::GetDimension(result_shape, output_feature_dim); const int64_t batch = ShapeUtil::GetDimension(lhs_shape, input_batch_dim); DimensionVector kernel_limits; DimensionVector output_limits; DimensionVector input_limits; if (window.dimensions().empty()) { window = window_util::MakeWindow({1}); kernel_limits.push_back(1); output_limits.push_back(1); input_limits.push_back(1); } else { for (int64_t spatial_dimension = 0; spatial_dimension < window.dimensions_size(); ++spatial_dimension) { const int64_t kernel_spatial_dim = dnums.kernel_spatial_dimensions(spatial_dimension); const int64_t kernel_limit = rhs_shape.dimensions(kernel_spatial_dim); kernel_limits.push_back(kernel_limit); const int64_t output_spatial_dim = dnums.output_spatial_dimensions(spatial_dimension); const int64_t output_limit = result_shape.dimensions(output_spatial_dim); output_limits.push_back(output_limit); const int64_t input_spatial_dim = dnums.input_spatial_dimensions(spatial_dimension); const int64_t input_limit = lhs_shape.dimensions(input_spatial_dim); input_limits.push_back(input_limit); } } DimensionVector valid_position_counts; for (int64_t spatial_dimension = 0; spatial_dimension < window.dimensions_size(); ++spatial_dimension) { const auto& window_dim = window.dimensions(spatial_dimension); if (input_limits[spatial_dimension] == output_limits[spatial_dimension] && kernel_limits[spatial_dimension] == output_limits[spatial_dimension] && input_limits[spatial_dimension] == window_dim.base_dilation() && window_dim.window_dilation() == 1 && std::max<int64_t>(1, input_limits[spatial_dimension] - 1) == window_dim.stride() && window_dim.padding_low() == 0 && window_dim.padding_high() == 0) { valid_position_counts.push_back(input_limits[spatial_dimension]); continue; } if (input_limits[spatial_dimension] == 1 && kernel_limits[spatial_dimension] == output_limits[spatial_dimension] && window_dim.window_dilation() == 1 && window_dim.base_dilation() == 1 && window_dim.stride() == 1 && window_dim.padding_high() == output_limits[spatial_dimension] - 1 && window_dim.padding_low() == output_limits[spatial_dimension] - 1) { valid_position_counts.push_back(output_limits[spatial_dimension]); continue; } int64_t valid_position_count = 0; for (int64_t kernel_idx = 0; kernel_idx < kernel_limits[spatial_dimension]; ++kernel_idx) { if (window_dim.stride() == 1 && window_dim.base_dilation() == 1) { const int64_t undilated_index_base = window_dim.padding_low() - kernel_idx * window_dim.window_dilation(); valid_position_count += std::max<int64_t>( std::min<int64_t>( input_limits[spatial_dimension] + undilated_index_base, output_limits[spatial_dimension]) - std::max<int64_t>(undilated_index_base, int64_t{0}), int64_t{0}); continue; } for (int64_t output_idx = 0; output_idx < output_limits[spatial_dimension]; ++output_idx) { const int64_t undilated_index = output_idx * window_dim.stride() - window_dim.padding_low() + kernel_idx * window_dim.window_dilation(); const int64_t lhs_spatial_index = window_dim.base_dilation() > 1 ? undilated_index / window_dim.base_dilation() : undilated_index; if (undilated_index != lhs_spatial_index * window_dim.base_dilation()) { continue; } if (lhs_spatial_index < 0 || lhs_spatial_index >= input_limits[spatial_dimension]) { continue; } valid_position_count += 1; } } valid_position_counts.push_back(valid_position_count); } const int64_t fma_count = (input_feature / convolution->feature_group_count()) * output_feature * (batch / convolution->batch_group_count()) * Product(valid_position_counts); return fma_count * kFmaFlops; } absl::Status HloCostAnalysis::HandleConvolution( const HloInstruction* convolution) { current_properties_[kFlopsKey] = GetConvolutionFlops(convolution); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleFft(const HloInstruction* fft) { auto real_shape = fft->operand(0)->shape().IsTuple() ? ShapeUtil::GetTupleElementShape(fft->operand(0)->shape(), 0) : fft->operand(0)->shape(); constexpr int kFmaPerComplexMul = 4; int64_t log_factors = 1; for (int64_t dim : fft->fft_length()) { log_factors *= Log2Floor<uint64_t>(dim); } current_properties_[kFlopsKey] = kFmaFlops * kFmaPerComplexMul * log_factors * ShapeUtil::ElementsIn(real_shape); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleTriangularSolve(const HloInstruction* hlo) { float bytes_accessed = GetShapeSize(hlo->shape()); current_properties_.set_output_bytes_accessed(GetShapeSize(hlo->shape())); bytes_accessed += GetShapeSize(hlo->operand(0)->shape()) / 2.0f; current_properties_.set_operand_bytes_accessed( 0, GetShapeSize(hlo->operand(0)->shape()) / 2.0f); bytes_accessed += GetShapeSize(hlo->operand(1)->shape()); current_properties_.set_operand_bytes_accessed( 0, GetShapeSize(hlo->operand(1)->shape())); current_properties_[kBytesAccessedKey] = bytes_accessed; const Shape& a_shape = hlo->operand(0)->shape(); const Shape& b_shape = hlo->operand(1)->shape(); int64_t elems = a_shape.dimensions(a_shape.dimensions_size() - 1); elems *= ShapeUtil::ElementsIn(b_shape); current_properties_[kFlopsKey] = kFmaFlops * elems; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCholesky(const HloInstruction* hlo) { float bytes_accessed = GetShapeSize(hlo->operand(0)->shape()) / 2.0f; current_properties_.set_output_bytes_accessed( GetShapeSize(hlo->operand(0)->shape()) / 2.0f); bytes_accessed += GetShapeSize(hlo->operand(0)->shape()) / 2.0f; current_properties_.set_operand_bytes_accessed( 0, GetShapeSize(hlo->operand(0)->shape()) / 2.0f); current_properties_[kBytesAccessedKey] = bytes_accessed; const Shape& a_shape = hlo->operand(0)->shape(); int64_t elems = a_shape.dimensions(a_shape.dimensions_size() - 1); elems *= ShapeUtil::ElementsIn(a_shape); current_properties_[kFlopsKey] = elems / 3; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleOptimizationBarrier( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAllGather(const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAllGatherStart(const HloInstruction* hlo) { return HandleAllGather(hlo); } absl::Status HloCostAnalysis::HandleAllGatherDone( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAllReduce(const HloInstruction* crs) { double flops = 0.0; int64_t output_bytes_accessed = 0; ShapeUtil::ForEachSubshape( crs->shape(), [&](const Shape& subshape, const ShapeIndex&) { if (subshape.IsArray()) { flops += ShapeUtil::ElementsIn(subshape); output_bytes_accessed += GetShapeSize(subshape); } }); int64_t bytes_accessed = output_bytes_accessed; for (const HloInstruction* operand : crs->operands()) { bytes_accessed += GetShapeSize(operand->shape()); } current_properties_[kFlopsKey] = flops; current_properties_.set_output_bytes_accessed(output_bytes_accessed); current_properties_[kBytesAccessedKey] = bytes_accessed; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleReduceScatter(const HloInstruction* hlo) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAllReduceStart(const HloInstruction* hlo) { return HandleAllReduce(hlo); } absl::Status HloCostAnalysis::HandleAllReduceDone( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleAllToAll(const HloInstruction* hlo) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCollectiveBroadcast( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCollectivePermute( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCollectivePermuteStart( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCollectivePermuteDone( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandlePartitionId(const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleReplicaId(const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleRng(const HloInstruction* random) { current_properties_[kTranscendentalsKey] = ShapeUtil::ElementsIn(random->shape()); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleRngBitGenerator( const HloInstruction* random) { current_properties_[kTranscendentalsKey] = ShapeUtil::ElementsInRecursive(random->shape()); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleRngGetAndUpdateState( const HloInstruction* random) { return absl::OkStatus(); } absl::Status HloCostAnalysis::FusionProcessOutputBytesAccessed( const HloInstruction* fusion) { ShapeUtil::ForEachSubshape( fusion->shape(), [this, fusion](const Shape& subshape, const ShapeIndex& shape_index) { if (!subshape.IsArray()) { return; } const HloInstruction* root = fusion->fused_expression_root(); auto further_examine_index = shape_index.size() == 1 && root->opcode() == HloOpcode::kTuple; if (further_examine_index && ShouldFilterFusionOutputIndex(fusion, shape_index)) { current_properties_.set_output_bytes_accessed(shape_index, 0); hlo_properties_[root->operand(shape_index[0])] [GetOperandUtilizationKey(0)] = 0; return; } if (further_examine_index) { root = root->operand(shape_index[0]); } if (root->opcode() == HloOpcode::kDynamicUpdateSlice) { int64_t size = GetShapeSize(root->operand(1)->shape()); current_properties_[kBytesAccessedKey] += size; current_properties_.set_output_bytes_accessed(shape_index, size); hlo_properties_[root][GetOperandUtilizationKey(0)] = 0; return; } current_properties_[kBytesAccessedKey] += GetShapeSize(subshape); current_properties_.set_output_bytes_accessed(shape_index, GetShapeSize(subshape)); }); if (fusion->shape().IsTuple()) { std::function<float(const Shape&, const ShapeIndex&)> propagate_output_size_to_parent; propagate_output_size_to_parent = [&](const Shape& shape, const ShapeIndex& shape_index) { float& bytes_accessed = current_properties_[GetOutputBytesAccessedKey(shape_index)]; if (bytes_accessed != 0) { return bytes_accessed; } for (int i = 0; i < shape.tuple_shapes_size(); ++i) { const Shape& subshape = shape.tuple_shapes(i); if (!subshape.IsTuple() && ShouldFilterFusionOutputIndex(fusion, {i})) { continue; } ShapeIndex subshape_index(shape_index); subshape_index.push_back(i); bytes_accessed += propagate_output_size_to_parent(subshape, subshape_index); } return bytes_accessed; }; current_properties_[GetOutputBytesAccessedKey()] = 0; propagate_output_size_to_parent(fusion->shape(), {}); } return absl::OkStatus(); } absl::Status HloCostAnalysis::FusionProcessOperandBytesRead( const HloInstruction* fusion) { for (int64_t i = 0; i < fusion->fused_parameters().size(); ++i) { const HloInstruction* operand = fusion->fused_parameter(i); int64_t operand_size = 0; if (ShouldFilterFusionInput(fusion, i)) { current_properties_.set_operand_bytes_accessed(i, operand_size); current_properties_.set_operand_utilization( i, hlo_properties_[operand][kUtilizationKey]); continue; } if (!operand->shape().IsTuple()) { operand_size = FusionParameterReadBytes(operand); } else { ShapeUtil::ForEachLeafShape( operand->shape(), [&](const Shape& , const ShapeIndex& index) { const HloInstruction* gte = operand; for (int64_t sub_index : index) { for (const HloInstruction* user : gte->users()) { if (user->opcode() == HloOpcode::kGetTupleElement && user->tuple_index() == sub_index) { gte = user; break; } } } int64_t size = FusionParameterReadBytes(gte); operand_size += size; current_properties_.set_operand_bytes_accessed(i, index, size); }); } current_properties_[kBytesAccessedKey] += operand_size; current_properties_.set_operand_bytes_accessed(i, operand_size); current_properties_.set_operand_utilization( i, hlo_properties_[operand][kUtilizationKey]); } return absl::OkStatus(); } absl::Status HloCostAnalysis::FusionCountConstantsMemoryAccess( const HloInstruction* fusion) { for (const HloInstruction* instr : fusion->fused_instructions_computation()->instructions()) { if (instr->opcode() == HloOpcode::kConstant && ShapeUtil::ElementsIn(instr->shape()) > immediate_constant_max_elements()) { float utilization = hlo_properties_[instr][kUtilizationKey]; if (!options_.count_multiple_input_accesses) { utilization = fmin(utilization, 1.0); } current_properties_[kBytesAccessedKey] += GetShapeSize(instr->shape()) * utilization; } } return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleFusion(const HloInstruction* fusion) { VLOG(8) << "Processing fusion " << fusion->ToString(); if (fusion->IsCustomFusion()) { for (const HloInstruction* hlo : fusion->fused_instructions_computation()->instructions()) { if (hlo->opcode() == HloOpcode::kGather) { return HandleGather(hlo); } if (hlo->opcode() == HloOpcode::kScatter) { return HandleScatter(hlo); } } } TF_ASSIGN_OR_RETURN( current_properties_, ProcessSubcomputation(fusion->fused_instructions_computation())); current_properties_[kBytesAccessedKey] = 0; TF_RETURN_IF_ERROR(FusionProcessOutputBytesAccessed(fusion)); TF_RETURN_IF_ERROR(FusionCalculateUtilizations(fusion)); TF_RETURN_IF_ERROR(FusionCountConstantsMemoryAccess(fusion)); TF_RETURN_IF_ERROR(FusionProcessOperandBytesRead(fusion)); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCall(const HloInstruction* call) { TF_ASSIGN_OR_RETURN(current_properties_, ProcessSubcomputation(call->to_apply())); current_should_compute_bottleneck_time_ = false; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleCustomCall( const HloInstruction* custom_call) { current_properties_[kOptimalSecondsKey] = -1; current_properties_[kBytesAccessedKey] = -1; current_properties_.set_output_bytes_accessed(-1); for (int i = 0; i < custom_call->operand_count(); ++i) { current_properties_.set_operand_bytes_accessed(i, -1); } current_properties_[kFlopsKey] = -1; current_should_compute_bottleneck_time_ = false; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleSort(const HloInstruction* sort) { int64_t elements = ShapeUtil::ElementsIn(sort->operand(0)->shape()); current_properties_[kFlopsKey] = elements * Log2Ceiling<uint64_t>(elements); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleTopK(const HloInstruction* topk) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleWhile(const HloInstruction* xla_while) { TF_ASSIGN_OR_RETURN(const Properties body_properties, ProcessSubcomputation(xla_while->while_body())); TF_ASSIGN_OR_RETURN(const Properties condition_properties, ProcessSubcomputation(xla_while->while_condition())); current_properties_ = Properties(); body_properties.ForEach([&](absl::string_view key, float val) { current_properties_[key] += val; }); condition_properties.ForEach([&](absl::string_view key, float val) { current_properties_[key] += val; }); current_should_compute_bottleneck_time_ = false; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleConditional( const HloInstruction* conditional) { TF_ASSIGN_OR_RETURN( const Properties branch0_computation_properties, ProcessSubcomputation(conditional->branch_computation(0))); current_properties_ = branch0_computation_properties; for (int j = 1; j < conditional->branch_count(); ++j) { TF_ASSIGN_OR_RETURN( const Properties branch_computation_properties, ProcessSubcomputation(conditional->branch_computation(j))); branch_computation_properties.ForEach( [&](absl::string_view key, float val) { auto& current_property = current_properties_[key]; current_property = std::max(current_property, val); }); } current_should_compute_bottleneck_time_ = false; return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleGather(const HloInstruction* gather) { int64_t output_size = GetShapeSize(gather->shape()); current_properties_[kBytesAccessedKey] = output_size * 2 + GetShapeSize(gather->operand(1)->shape()); current_properties_.set_operand_bytes_accessed(0, output_size); current_properties_.set_operand_bytes_accessed( 1, GetShapeSize(gather->operand(1)->shape())); current_properties_.set_operand_utilization( 0, 1.0 * ShapeUtil::ElementsIn(gather->shape()) / ShapeUtil::ElementsIn(gather->operand(0)->shape())); current_properties_.set_output_bytes_accessed(output_size); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleScatter(const HloInstruction* hlo) { auto* scatter = Cast<HloScatterInstruction>(hlo); int64_t total_update_size = 0; for (int i = 0, n = scatter->scatter_operand_count(); i < n; ++i) { int64_t update_size = GetShapeSize(scatter->scatter_updates()[i]->shape()); current_properties_.set_operand_bytes_accessed(i, update_size); current_properties_.set_operand_bytes_accessed(n + 1 + i, update_size); total_update_size += update_size; } int64_t scatter_indices_size = GetShapeSize(scatter->scatter_indices()->shape()); current_properties_.set_operand_bytes_accessed( scatter->scatter_operand_count(), scatter_indices_size); current_properties_[kBytesAccessedKey] = total_update_size * 3 + scatter_indices_size; current_properties_.set_output_bytes_accessed(total_update_size); const int64_t element_count = ShapeUtil::ElementsIn(scatter->scatter_updates()[0]->shape()); TF_ASSIGN_OR_RETURN(const Properties sub_properties, ProcessSubcomputation(scatter->to_apply())); sub_properties.ForEach([&](absl::string_view key, float val) { if (KeyToCopyFromSubcomputation(key)) { current_properties_[key] = val * element_count; } }); return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleGetDimensionSize( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::HandleSetDimensionSize( const HloInstruction* ) { return absl::OkStatus(); } absl::Status HloCostAnalysis::FinishVisit(const HloInstruction*) { return absl::OkStatus(); } float HloCostAnalysis::flop_count() const { return properties_sum_[kFlopsKey]; } float HloCostAnalysis::transcendental_count() const { return properties_sum_[kTranscendentalsKey]; } float HloCostAnalysis::bytes_accessed() const { return properties_sum_[kBytesAccessedKey]; } float HloCostAnalysis::optimal_seconds() const { return properties_sum_[kOptimalSecondsKey]; } HloCostAnalysis::Properties HloCostAnalysis::properties( const HloInstruction& hlo) const { auto it = hlo_properties_.find(&hlo); if (it == hlo_properties_.end()) { return Properties(); } return it->second; } int64_t HloCostAnalysis::flop_count(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kFlopsKey, hlo_properties_); } int64_t HloCostAnalysis::transcendental_count(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kTranscendentalsKey, hlo_properties_); } int64_t HloCostAnalysis::bytes_accessed(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kBytesAccessedKey, hlo_properties_); } int64_t HloCostAnalysis::operand_bytes_accessed(const HloInstruction& hlo, int64_t operand_num, ShapeIndex index) const { return GetPropertyForHlo(hlo, GetOperandBytesAccessedKey(operand_num, index), hlo_properties_); } float HloCostAnalysis::operand_utilization(const HloInstruction& hlo, int64_t operand_num, ShapeIndex index) const { return GetPropertyForHlo(hlo, GetOperandUtilizationKey(operand_num, index), hlo_properties_); } int64_t HloCostAnalysis::output_bytes_accessed(const HloInstruction& hlo, ShapeIndex index) const { return GetPropertyForHlo(hlo, GetOutputBytesAccessedKey(index), hlo_properties_); } float HloCostAnalysis::optimal_seconds(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kOptimalSecondsKey, hlo_properties_); } int64_t HloCostAnalysis::GetBytesRead( const HloInstruction& hlo, std::optional<int64_t> memory_space) const { int64_t bytes_read = 0; for (int operand_number = 0; operand_number < hlo.operand_count(); ++operand_number) { const Shape& shape = hlo.operand(operand_number)->shape(); ShapeUtil::ForEachSubshape( shape, [&](const Shape& sub_shape, const ShapeIndex& index) { if (ShapeUtil::IsLeafIndex(shape, index)) { std::optional<int64_t> index_memory_space; if (sub_shape.has_layout()) { index_memory_space = sub_shape.layout().memory_space(); } if (!memory_space || memory_space == index_memory_space) { bytes_read += operand_bytes_accessed(hlo, operand_number, index); } } }); } return bytes_read; } int64_t HloCostAnalysis::GetBytesWritten( const HloInstruction& hlo, std::optional<int64_t> memory_space) const { int64_t bytes_written = 0; ShapeUtil::ForEachLeafShape( hlo.shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { std::optional<int64_t> index_memory_space; if (sub_shape.has_layout()) { index_memory_space = sub_shape.layout().memory_space(); } if (!memory_space || memory_space == index_memory_space) { bytes_written += output_bytes_accessed(hlo, index); } }); return bytes_written; } absl::StatusOr<HloCostAnalysis::Properties> HloCostAnalysis::ProcessSubcomputation(HloComputation* computation) { auto visitor = CreateNestedCostAnalysis(); visitor->ReserveVisitStates(computation->instruction_count()); TF_RETURN_IF_ERROR(computation->Accept(visitor.get())); for (auto& entry : visitor->hlo_properties_) { hlo_properties_[entry.first] = std::move(entry.second); } return visitor->properties(); } std::unique_ptr<HloCostAnalysis> HloCostAnalysis::CreateNestedCostAnalysis() { return std::make_unique<HloCostAnalysis>(options_); } std::string HloCostAnalysis::GetOperandBytesAccessedKey( int64_t operand_num, const ShapeIndex& index) { return absl::StrCat(kBytesAccessedKey, operand_num, index.ToString()); } std::string HloCostAnalysis::GetOperandUtilizationKey( int64_t operand_num, const ShapeIndex& index) { return absl::StrCat(kUtilizationKey, operand_num, index.ToString()); } std::string HloCostAnalysis::GetOutputBytesAccessedKey( const ShapeIndex& index) { return absl::StrCat(kBytesAccessedKey, "out", index.ToString()); } bool HloCostAnalysis::KeyToCopyFromSubcomputation(absl::string_view key) const { return !absl::StartsWith(key, kBytesAccessedKey) && !absl::StartsWith(key, kUtilizationKey); } }
#include "xla/service/hlo_cost_analysis.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/status/statusor.h" #include "xla/client/client.h" #include "xla/client/client_library.h" #include "xla/client/local_client.h" #include "xla/hlo/builder/padding.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/builder/xla_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/hlo_parser.h" #include "xla/service/local_service.h" #include "xla/service/service.h" #include "xla/shape_util.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/logging.h" namespace xla { namespace { constexpr int64_t kPointerSize = 8; int64_t ShapeSize(const Shape& shape) { return ShapeUtil::ByteSizeOf(shape, kPointerSize); } class HloCostAnalysisTest : public ::testing::Test { protected: HloCostAnalysisTest() : client_(ClientLibrary::LocalClientOrDie()), service_(static_cast<Service*>(ClientLibrary::GetXlaService( static_cast<LocalClient*>(client_)->platform()))) { { XlaBuilder builder("add_and_exp"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x"); auto half = ConstantR0<float>(&builder, 0.5); Exp(Add(x, half)); auto computation_status = builder.Build(); TF_CHECK_OK(computation_status.status()); add_and_exp_ = std::move(computation_status).value(); } { XlaBuilder builder("add"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x"); auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y"); Add(x, y); auto computation_status = builder.Build(); TF_CHECK_OK(computation_status.status()); add_ = std::move(computation_status).value(); } { XlaBuilder builder("sigmoid"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x"); auto one = ConstantR0<float>(&builder, 1.0); Div(one, Add(one, Exp(Neg(x)))); auto computation_status = builder.Build(); TF_CHECK_OK(computation_status.status()); sigmoid_ = std::move(computation_status).value(); } { XlaBuilder builder("max"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x"); auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y"); Max(x, y); auto computation_status = builder.Build(); TF_CHECK_OK(computation_status.status()); max_ = std::move(computation_status).value(); } { XlaBuilder builder("gt"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x"); auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {}), "y"); Gt(x, y); auto computation_status = builder.Build(); TF_CHECK_OK(computation_status.status()); gt_ = std::move(computation_status).value(); } } std::unique_ptr<HloModule> BuildHloGraph(XlaBuilder* builder) { auto computation_status = builder->Build(); TF_CHECK_OK(computation_status.status()); auto computation = std::move(computation_status).value(); auto config = HloModule::CreateModuleConfigFromProto(computation.proto(), DebugOptions()) .value(); return HloModule::CreateFromProto(computation.proto(), config).value(); } Client* client_; Service* service_; XlaComputation add_; XlaComputation add_and_exp_; XlaComputation sigmoid_; XlaComputation max_; XlaComputation gt_; }; TEST_F(HloCostAnalysisTest, MatrixMultiply) { XlaBuilder builder("matrix_multiply"); auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs"); auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs"); Dot(lhs, rhs); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5); EXPECT_EQ(analysis.transcendental_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 5 + 5 * 30 + 10 * 30)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 30); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 30); } TEST_F(HloCostAnalysisTest, DotGeneral) { XlaBuilder builder("matrix_multiply"); auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5, 5}), "lhs"); auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 5, 30}), "rhs"); DotDimensionNumbers dnums; dnums.add_lhs_contracting_dimensions(1); dnums.add_lhs_contracting_dimensions(2); dnums.add_rhs_contracting_dimensions(0); dnums.add_rhs_contracting_dimensions(1); DotGeneral(lhs, rhs, dnums); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5); EXPECT_EQ(analysis.transcendental_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 5 * 5 + 5 * 5 * 30 + 10 * 30)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5 * 5); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 5 * 30); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 30); } TEST_F(HloCostAnalysisTest, DotGeneral2) { XlaBuilder builder("matrix_multiply"); auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5, 5}), "lhs"); auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 5, 30}), "rhs"); DotDimensionNumbers dnums; dnums.add_lhs_contracting_dimensions(1); dnums.add_lhs_batch_dimensions(2); dnums.add_rhs_contracting_dimensions(0); dnums.add_rhs_batch_dimensions(1); DotGeneral(lhs, rhs, dnums); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5); EXPECT_EQ(analysis.transcendental_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 5 * 5 + 5 * 5 * 30 + 5 * 10 * 30)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5 * 5); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 5 * 30); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 5 * 10 * 30); } TEST_F(HloCostAnalysisTest, DotGeneral3) { XlaBuilder builder("matrix_multiply"); auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "lhs"); auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 30}), "rhs"); DotDimensionNumbers dnums; DotGeneral(lhs, rhs, dnums); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 10 * 30 * 5 * 5); EXPECT_EQ(analysis.transcendental_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 5 + 5 * 30 + 5 * 5 * 10 * 30)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 5); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 5 * 30); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 5 * 5 * 10 * 30); } TEST_F(HloCostAnalysisTest, Map) { XlaBuilder builder("map"); auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10}), "in"); Map(&builder, {input}, add_and_exp_, {0}); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 10); EXPECT_EQ(analysis.transcendental_count(), 10); EXPECT_EQ(analysis.bytes_accessed(), 80); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10); } TEST_F(HloCostAnalysisTest, Convolution) { XlaBuilder builder("convolution"); auto input = Parameter( &builder, 0, ShapeUtil::MakeShape(F32, {1, 1, 10, 20}), "input"); auto kernel = Parameter( &builder, 1, ShapeUtil::MakeShape(F32, {1, 1, 3, 3}), "kernel"); Conv(input, kernel, {1, 1}, Padding::kValid); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 8 * 18 * 2 * 3 * 3); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 3 * 3 + 8 * 18)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 3 * 3); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 8 * 18); } TEST_F(HloCostAnalysisTest, ConvolutionSame) { XlaBuilder builder("convolution_same"); const int iw = 3; const int ih = 3; const int kw = 3; const int kh = 3; const int ow = iw; const int oh = ih; const int sx = 1; const int sy = 1; auto input = Parameter( &builder, 0, ShapeUtil::MakeShape(F32, {1, 1, ih, iw}), "input"); auto kernel = Parameter( &builder, 1, ShapeUtil::MakeShape(F32, {1, 1, kh, kw}), "kernel"); Conv(input, kernel, {sx, sy}, Padding::kSame); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * (4 + 6 + 4 + 6 + 9 + 6 + 4 + 6 + 4)); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (iw * ih + kw * kh + ow * oh)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * iw * ih); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * kw * kh); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * ow * oh); } TEST_F(HloCostAnalysisTest, ConvolutionExtreme) { XlaBuilder builder("convolution"); constexpr int64_t kLarge = 512 * 1024; auto input = Parameter( &builder, 0, ShapeUtil::MakeShape(F32, {1, 1, kLarge}), "input"); auto kernel = Parameter( &builder, 1, ShapeUtil::MakeShape(F32, {1, 1, kLarge}), "kernel"); ConvGeneralDilated(input, kernel, {kLarge - 1}, {{0, 0}}, {kLarge}, {1}, XlaBuilder::CreateDefaultConvDimensionNumbers(1)); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * kLarge); } TEST_F(HloCostAnalysisTest, ConvolutionExtreme2) { XlaBuilder builder("convolution"); constexpr int64_t kLarge = 512 * 1024; auto input = Parameter( &builder, 0, ShapeUtil::MakeShape(F32, {1, 1, 1}), "input"); auto kernel = Parameter( &builder, 1, ShapeUtil::MakeShape(F32, {1, 1, kLarge}), "kernel"); ConvGeneralDilated(input, kernel, {1}, {{kLarge - 1, kLarge - 1}}, {1}, {1}, XlaBuilder::CreateDefaultConvDimensionNumbers(1)); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * kLarge); } TEST_F(HloCostAnalysisTest, ConvolutionWithFeatureGroup) { XlaBuilder builder("convolution"); auto input = Parameter( &builder, 0, ShapeUtil::MakeShape(F32, {1, 120, 10, 20}), "input"); auto kernel = Parameter( &builder, 1, ShapeUtil::MakeShape(F32, {120, 1, 3, 3}), "kernel"); Conv(input, kernel, {1, 1}, Padding::kValid, 120); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 120 * 8 * 18 * 2 * 3 * 3); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (120 * 10 * 20 + 120 * 3 * 3 + 120 * 8 * 18)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 120 * 10 * 20); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 120 * 3 * 3); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 120 * 8 * 18); } TEST_F(HloCostAnalysisTest, Reduce) { XlaBuilder builder("reduce"); auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input"); Reduce(input, ConstantR0<float>(&builder, 0.0f), add_, {1}); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 10 * 20 - 10); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 1 + 10)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10); } TEST_F(HloCostAnalysisTest, ReduceWindow) { XlaBuilder builder("reduce_window"); auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input"); ReduceWindow(input, ConstantR0<float>(&builder, 0), add_, {4, 5}, {4, 5}, Padding::kValid); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 4 * (4 * 5 - 1)); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 1 + 2 * 4)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 4); } TEST_F(HloCostAnalysisTest, ReduceWindowWithOverlaps) { XlaBuilder builder("reduce_window"); auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {8, 8}), "input"); ReduceWindow(input, ConstantR0<float>(&builder, 0), add_, {4, 5}, {2, 1}, Padding::kValid); auto hlo_module = BuildHloGraph(&builder); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); int n_output_elements = 3 * 4; HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK(root->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), n_output_elements * (4 * 5 - 1)); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (8 * 8 + 1 + n_output_elements)); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 8 * 8); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * n_output_elements); } TEST_F(HloCostAnalysisTest, ReduceWindowSingleDimReduceBroadcast) { absl::string_view hlo_text = R"( HloModule fusion.50 region_0.868 { Arg_1.870 = f32[] parameter(1) Arg_0.869 = f32[] parameter(0) ROOT maximum.871 = f32[] maximum(Arg_0.869, Arg_1.870) } ENTRY fusion.50 { constant.367 = f32[] constant(-inf) param0 = f32[2,3,1024,1024]{2,3,1,0} parameter(0) ROOT reduce-window.159 = f32[2,3,1024,1024]{2,3,1,0} reduce-window(param0, constant.367), window={size=1x1x1x2047 pad=0_0x0_0x0_0x1023_1023}, to_apply=region_0.868 } )"; auto hlo_module = ParseAndReturnUnverifiedModule(hlo_text).value(); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), (2 * 3 * 1024) + (1024 - 1)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 2 * 3 * 1024 * 1024); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 1); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 3 * 1024 * 1024); } TEST_F(HloCostAnalysisTest, ReduceWindowVariadic) { XlaBuilder builder("reduce_window_variadic"); auto elem_shape = ShapeUtil::MakeShape(F32, {}); auto p2 = Parameter(&builder, 0, elem_shape, "x0"); auto p3 = Parameter(&builder, 1, elem_shape, "x1"); auto p4 = Parameter(&builder, 2, elem_shape, "y0"); auto p5 = Parameter(&builder, 3, elem_shape, "y1"); absl::InlinedVector<XlaOp, 2> compute_vec = {Min(p2, p4), Min(p3, p5)}; Tuple(&builder, compute_vec); TF_ASSERT_OK_AND_ASSIGN(auto compute_tuple, builder.Build()); auto input1 = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input1"); auto input2 = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {10, 20}), "input2"); auto init = ConstantR0<float>(&builder, 0); ReduceWindow({input1, input2}, {init, init}, compute_tuple, {4, 5}, {4, 5}, Padding::kValid); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 4 * 2 * (4 * 5 - 1)); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 * 2 + 2 * 3)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 10 * 20); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 4); } TEST_F(HloCostAnalysisTest, SelectAndScatter) { XlaBuilder builder("select_and_scatter"); auto operand = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 20}), "input"); auto source = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {2, 4}), "source"); SelectAndScatter(operand, gt_, {4, 5}, {4, 5}, Padding::kValid, source, ConstantR0<float>(&builder, 0), add_); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 4 * (4 * 5 - 1 + 1)); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (10 * 20 + 2 * 4 + 1 + 10 * 20)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 20); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float) * 2 * 4); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(float) * 1); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 20); } TEST_F(HloCostAnalysisTest, Broadcast) { XlaBuilder b("broadcast"); Broadcast(ConstantR0<float>(&b, 42), {10, 7}); auto hlo_module = BuildHloGraph(&b); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (1 + 10 * 7)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 1); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 7); } TEST_F(HloCostAnalysisTest, BroadcastCountMultipleInputAccesses) { XlaBuilder b("broadcast"); Broadcast(ConstantR0<float>(&b, 42), {10, 7}); auto hlo_module = BuildHloGraph(&b); HloCostAnalysis analysis(HloCostAnalysis::Options{ .shape_size = ShapeSize, .count_multiple_input_accesses = true}); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), sizeof(float) * (1 + 10 * 7)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 10 * 7); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 10 * 7); } TEST_F(HloCostAnalysisTest, FullyConnectedForward) { XlaBuilder builder("fully_connected_forward"); auto input = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {10, 5}), "input"); auto weight = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {5, 20}), "weight"); auto bias = Parameter(&builder, 2, ShapeUtil::MakeShape(F32, {20}), "bias"); Map(&builder, {Add(Dot(input, weight), bias, {1})}, sigmoid_, {0, 1}); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 2 * 1000 + 200 + 3 * 200); EXPECT_EQ(analysis.transcendental_count(), 200); } TEST_F(HloCostAnalysisTest, MatmulAndConvolutionCanBeTheSameComputation) { HloCostAnalysis conv_analysis(ShapeSize); { XlaBuilder builder("conv_looking_matmul"); auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}), "input"); auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64, 1, 1}), "weights"); Conv(lhs, rhs, {1, 1}, Padding::kSame); auto hlo_module = BuildHloGraph(&builder); ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept( &conv_analysis)); } HloCostAnalysis matmul_analysis(ShapeSize); { XlaBuilder builder("matmul"); auto lhs = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {64, 64}), "input"); auto rhs = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {64, 64}), "weights"); Dot(lhs, rhs); auto hlo_module = BuildHloGraph(&builder); ASSERT_IS_OK(hlo_module->entry_computation()->root_instruction()->Accept( &matmul_analysis)); } EXPECT_EQ(conv_analysis.flop_count(), matmul_analysis.flop_count()); } TEST_F(HloCostAnalysisTest, LatencyBoundedOptimalTime) { absl::string_view hlo_string = R"( HloModule module, is_scheduled=true ENTRY Entry { param0 = f32[1,1] parameter(0) param1 = f32[1,1] parameter(1) ROOT add = f32[1,1] add(param0, param1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); const HloInstruction* add = module->entry_computation()->root_instruction(); HloCostAnalysis::Options options{ShapeSize}; const float clock_cycle_seconds = 10.0f; options.set_flops_per_second(1024); options.set_bytes_per_second(1024); options.set_transcendentals_per_second(1024); options.set_flops_min_latency_second(clock_cycle_seconds); HloCostAnalysis cost_analysis(options); ASSERT_IS_OK(add->Accept(&cost_analysis)); EXPECT_EQ(cost_analysis.optimal_seconds(), clock_cycle_seconds); } using FusionCostAnalysis = HloTestBase; TEST_F(FusionCostAnalysis, LoopFusionDynUpdateSlice) { const char* hlo_fusion_module_str = R"( HloModule module _.1 { tmp_0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0) tmp_1 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2) tmp_2 = s32[]{:T(128)} parameter(1) tmp_3 = s32[]{:T(128)} constant(0) tmp_4 = bf16[1,32,256,1152]{3,2,1,0:T(8,128)(2,1)S(3)} dynamic-slice(tmp_1, tmp_2, tmp_3, tmp_3, tmp_3), dynamic_slice_sizes={1,32,256,1152} tmp_11 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(tmp_0, tmp_4, tmp_2, tmp_3, tmp_3, tmp_3) ROOT tmp_20 = (bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)}) tuple(tmp_11) } ENTRY _ { _0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0) _1 = s32[]{:T(128)} parameter(1) _4 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2) ROOT _ = (bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)}) fusion(_0, _1, _4), kind=kLoop, calls=_.1 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_fusion_module_str)); HloCostAnalysis fusion_analysis(ShapeSize); HloInstruction* fusion = module->entry_computation()->root_instruction(); ASSERT_IS_OK(fusion->Accept(&fusion_analysis)); const char* hlo_dus_module_str = R"( HloModule module ENTRY _ { _0 = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(0) _1 = s32[]{:T(128)} parameter(1) _2 = bf16[1,32,256,1152]{3,2,1,0:T(8,128)(2,1)} parameter(2) ROOT _ = bf16[50,32,256,1152]{3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(_0, _2, _1, _1, _1, _1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto dus_module, ParseAndReturnVerifiedModule(hlo_dus_module_str)); HloCostAnalysis dus_analysis(ShapeSize); auto dus = dus_module->entry_computation()->root_instruction(); ASSERT_IS_OK(dus->Accept(&dus_analysis)); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), 0); EXPECT_EQ(fusion_analysis.bytes_accessed(), dus_analysis.bytes_accessed()); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), dus_analysis.operand_bytes_accessed(*dus, 0)); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1), dus_analysis.operand_bytes_accessed(*dus, 2)); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 2), dus_analysis.operand_bytes_accessed(*dus, 1)); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion), dus_analysis.output_bytes_accessed(*dus)); } TEST_F(FusionCostAnalysis, LoopFusion) { for (int i = 0; i < 4; ++i) { Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2}); HloComputation::Builder builder(TestName()); auto c1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( 0.0f, 1.0f, 2, 2))); auto c2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( 1.0f, 2.0f, 2, 2))); auto c3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( 2.0f, 3.0f, 2, 2))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c2)); auto clamp = builder.AddInstruction( HloInstruction::CreateTernary(r2f32, HloOpcode::kClamp, c2, add, add)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(r2f32, HloOpcode::kExp, add)); auto mul = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, exp, c3)); auto sub = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kSubtract, mul, clamp)); auto tuple = HloInstruction::CreateTuple({sub, sub, mul, c1}); auto module = CreateNewVerifiedModule(); auto* computation = module->AddEntryComputation(builder.Build()); auto* fusion = computation->CreateFusionInstruction( {sub, mul, exp, clamp, add}, HloInstruction::FusionKind::kLoop); HloCostAnalysis::Options options{ShapeSize}; options.set_flops_per_second(16 * (i == 1 ? 1 / 2.0 : 1.0)); options.set_transcendentals_per_second(4 * (i == 2 ? 1 / 4.0 : 1.0)); options.set_bytes_per_second(64 * (i == 3 ? 1 / 8.0 : 1.0)); HloCostAnalysis fusion_analysis(options); ASSERT_IS_OK(fusion->Accept(&fusion_analysis)); EXPECT_EQ(fusion_analysis.flop_count(), 16); EXPECT_EQ(fusion_analysis.transcendental_count(), 4); constexpr int64_t bytes_accessed = sizeof(float) * 4 * 2 * 2; static_assert(bytes_accessed == 64, ""); EXPECT_EQ(fusion_analysis.bytes_accessed(), bytes_accessed); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 2), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.optimal_seconds(), 1 << i); } } TEST_F(FusionCostAnalysis, NestedCopyFusion) { absl::string_view nested_fusion_text = R"( HloModule temp, is_scheduled=true copy_fusion.1291.clone { input.1291 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)S(1)} parameter(0) ROOT copy.74276 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} copy(input.1291) } fused_computation.4150.clone { param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0) fusion.103344 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_0.185389), kind=kLoop, calls=copy_fusion.1291.clone constant.230138 = s32[]{:T(128)} constant(0) param_1.219146 = s32[]{:T(128)S(6)} parameter(1) ROOT dynamic-slice.40526 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(fusion.103344, constant.230138, param_1.219146, constant.230138, constant.230138), dynamic_slice_sizes={2,384,2,256} } ENTRY temp { param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0) param_3.66279 = s32[]{:T(128)S(6)} parameter(1) ROOT fusion.85943 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279), kind=kLoop, calls=fused_computation.4150.clone } )"; absl::string_view fusion_text = R"( HloModule temp, is_scheduled=true fused_computation.4150.clone { param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0) constant.230138 = s32[]{:T(128)} constant(0) param_1.219146 = s32[]{:T(128)S(6)} parameter(1) ROOT dynamic-slice.40526 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(param_0.185389, constant.230138, param_1.219146, constant.230138, constant.230138), dynamic_slice_sizes={2,384,2,256} } ENTRY temp { param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0) param_3.66279 = s32[]{:T(128)S(6)} parameter(1) ROOT fusion.85943 = s8[2,384,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279), kind=kLoop, calls=fused_computation.4150.clone } )"; TF_ASSERT_OK_AND_ASSIGN(auto nested_fusion_module, ParseAndReturnVerifiedModule(nested_fusion_text)); HloCostAnalysis nested_analysis(ShapeSize); auto* nested_root = nested_fusion_module->entry_computation()->root_instruction(); ASSERT_IS_OK(nested_root->Accept(&nested_analysis)); TF_ASSERT_OK_AND_ASSIGN(auto fusion_module, ParseAndReturnVerifiedModule(fusion_text)); HloCostAnalysis fusion_analysis(ShapeSize); auto* fusion_root = fusion_module->entry_computation()->root_instruction(); ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis)); EXPECT_EQ(nested_analysis.bytes_accessed(*nested_root), fusion_analysis.bytes_accessed(*fusion_root)); } TEST_F(FusionCostAnalysis, NestedCopyFusionDUS) { absl::string_view nested_fusion_text = R"( HloModule temp, is_scheduled=true copy_fusion.1291.clone { input.1291 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0) ROOT copy.74276 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} copy(input.1291) } fused_computation.4150.clone { param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0) fusion.103344 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_0.185389), kind=kLoop, calls=copy_fusion.1291.clone param_1.185389 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2) constant.230138 = s32[]{:T(128)} constant(0) param_1.219146 = s32[]{:T(128)S(6)} parameter(1) param_3.229 = pred[]{:T(512)} constant(false) broadcast.11499 = pred[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} broadcast(param_3.229), dimensions={} dynamic-slice.11241 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(fusion.103344, constant.230138, constant.230138, param_1.219146, constant.230138), dynamic_slice_sizes={2,6144,1,256} select.9063 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} select(broadcast.11499, param_1.185389, dynamic-slice.11241) ROOT dynamic-update-slice.40526 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-update-slice(fusion.103344, select.9063, constant.230138, constant.230138, param_1.219146, constant.230138) } ENTRY temp { param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(32,128)(4,1)} parameter(0) param_3.66279 = s32[]{:T(128)S(6)} parameter(1) param_1.123719 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2) ROOT fusion.85943 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279, param_1.123719), kind=kLoop, calls=fused_computation.4150.clone } )"; absl::string_view fusion_text = R"( HloModule temp, is_scheduled=true fused_computation.4150.clone { param_0.185389 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0) param_1.185389 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2) constant.230138 = s32[]{:T(128)} constant(0) param_1.219146 = s32[]{:T(128)S(6)} parameter(1) param_3.229 = pred[]{:T(512)} constant(false) broadcast.11499 = pred[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} broadcast(param_3.229), dimensions={} dynamic-slice.11241 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} dynamic-slice(param_0.185389, constant.230138, constant.230138, param_1.219146, constant.230138), dynamic_slice_sizes={2,6144,1,256} select.9063 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} select(broadcast.11499, param_1.185389, dynamic-slice.11241) ROOT dynamic-update-slice.40526 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} dynamic-update-slice(param_0.185389, select.9063, constant.230138, constant.230138, param_1.219146, constant.230138) } ENTRY temp { param_2.123719 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} parameter(0) param_3.66279 = s32[]{:T(128)S(6)} parameter(1) param_1.123719 = s8[2,6144,1,256]{3,1,0,2:T(8,128)(4,1)} parameter(2) ROOT fusion.85943 = s8[2,6144,2,256]{3,1,0,2:T(8,128)(4,1)} fusion(param_2.123719, param_3.66279, param_1.123719), kind=kLoop, calls=fused_computation.4150.clone } )"; TF_ASSERT_OK_AND_ASSIGN(auto nested_fusion_module, ParseAndReturnVerifiedModule(nested_fusion_text)); HloCostAnalysis nested_analysis(ShapeSize); auto* nested_root = nested_fusion_module->entry_computation()->root_instruction(); ASSERT_IS_OK(nested_root->Accept(&nested_analysis)); TF_ASSERT_OK_AND_ASSIGN(auto fusion_module, ParseAndReturnVerifiedModule(fusion_text)); HloCostAnalysis fusion_analysis(ShapeSize); auto* fusion_root = fusion_module->entry_computation()->root_instruction(); ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis)); EXPECT_EQ(nested_analysis.bytes_accessed(*nested_root), fusion_analysis.bytes_accessed(*fusion_root)); } TEST_F(FusionCostAnalysis, NestedFusionFeedsMultipleUsers) { absl::string_view hlo_text = R"( HloModule temp, is_scheduled=true fused_computation.1 { tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0) tmp_1 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0), kind=kLoop, calls= { tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0) ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} add(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0) } tmp_2 = bf16[]{:T(256)} constant(0) tmp_3 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} reduce-window(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_1, bf16[]{:T(256)} tmp_2), window={size=1x1x1x1023 pad=0_0x0_0x0_0x511_511}, to_apply= { tmp_0 = bf16[]{:T(256)} parameter(0) tmp_1 = bf16[]{:T(256)} parameter(1) ROOT tmp_2 = bf16[]{:T(256)} add(bf16[]{:T(256)} tmp_0, bf16[]{:T(256)} tmp_1) } ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} divide(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_1, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_3) } ENTRY temp { tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0) ROOT result = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(tmp_0), kind=kLoop, calls=fused_computation.1 } )"; TF_ASSERT_OK_AND_ASSIGN(auto fusion_module, ParseAndReturnVerifiedModule(hlo_text)); HloCostAnalysis fusion_analysis(ShapeSize); auto* fusion_root = fusion_module->entry_computation()->root_instruction(); ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis)); EXPECT_EQ(1073741824, fusion_analysis.bytes_accessed(*fusion_root)); } TEST_F(FusionCostAnalysis, ParamFeedsNestedFusionAndTrivialUser) { absl::string_view hlo_text = R"( HloModule temp, is_scheduled=true fused_computation.1 { tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0) tmp_1 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0), kind=kLoop, calls= { tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0) ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} add(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0) } tmp_2 = bf16[]{:T(256)} constant(0) tmp_3 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} reduce-window(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_1, bf16[]{:T(256)} tmp_2), window={size=1x1x1x1023 pad=0_0x0_0x0_0x511_511}, to_apply= { tmp_0 = bf16[]{:T(256)} parameter(0) tmp_1 = bf16[]{:T(256)} parameter(1) ROOT tmp_2 = bf16[]{:T(256)} add(bf16[]{:T(256)} tmp_0, bf16[]{:T(256)} tmp_1) } ROOT tmp_4 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} divide(bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_0, bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} tmp_3) } ENTRY temp { tmp_0 = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} parameter(0) ROOT result = bf16[64,16,512,512]{2,3,1,0:T(8,128)(2,1)} fusion(tmp_0), kind=kLoop, calls=fused_computation.1 } )"; TF_ASSERT_OK_AND_ASSIGN(auto fusion_module, ParseAndReturnVerifiedModule(hlo_text)); HloCostAnalysis fusion_analysis(ShapeSize); auto* fusion_root = fusion_module->entry_computation()->root_instruction(); ASSERT_IS_OK(fusion_root->Accept(&fusion_analysis)); EXPECT_EQ(1610612736, fusion_analysis.bytes_accessed(*fusion_root)); } TEST_F(FusionCostAnalysis, LoopFusionTupleOutput) { Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2}); HloComputation::Builder builder(TestName()); auto c1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( 0.0f, 1.0f, 2, 2))); auto c2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( 1.0f, 2.0f, 2, 2))); auto c3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( 2.0f, 3.0f, 2, 2))); auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({c1, c2})); auto add = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c2)); auto clamp = builder.AddInstruction( HloInstruction::CreateTernary(r2f32, HloOpcode::kClamp, c2, add, add)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(r2f32, HloOpcode::kExp, add)); auto mul = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, exp, c3)); auto sub = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kSubtract, mul, clamp)); auto tuple2 = builder.AddInstruction( HloInstruction::CreateTuple({sub, sub, mul, tuple1})); auto module = CreateNewVerifiedModule(); auto* computation = module->AddEntryComputation(builder.Build()); auto* fusion = computation->CreateFusionInstruction( {tuple2, sub, mul, exp, clamp, add}, HloInstruction::FusionKind::kLoop); HloCostAnalysis fusion_analysis(ShapeSize); ASSERT_IS_OK(fusion->Accept(&fusion_analysis)); EXPECT_EQ(fusion_analysis.flop_count(), 16); EXPECT_EQ(fusion_analysis.transcendental_count(), 4); EXPECT_EQ(fusion_analysis.bytes_accessed(*fusion), sizeof(float) * (5 + 5) * 2 * 2); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2 * 2); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 2), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 3), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion), sizeof(float) * 5 * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {0}), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {1}), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {2}), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {3}), sizeof(float) * 2 * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {3, 0}), sizeof(float) * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion, {3, 1}), sizeof(float) * 2 * 2); } TEST_F(FusionCostAnalysis, NoLayout) { Shape shape_with_layout = ShapeUtil::MakeShape(F32, {2, 3, 4, 5}); Shape shape_without_layout = shape_with_layout; shape_without_layout.clear_layout(); HloComputation::Builder builder(TestName()); auto c1 = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR4FromArray4D(Array4D<float>(2, 3, 4, 5)))); auto c2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1, 2, 3}))); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(shape_without_layout, c2, {1})); auto add = builder.AddInstruction(HloInstruction::CreateBinary( shape_with_layout, HloOpcode::kAdd, c1, broadcast)); auto module = CreateNewVerifiedModule(); auto* computation = module->AddEntryComputation(builder.Build()); auto* fusion = computation->CreateFusionInstruction( {add, broadcast}, HloInstruction::FusionKind::kLoop); HloCostAnalysis fusion_analysis(ShapeSize); ASSERT_IS_OK(fusion->Accept(&fusion_analysis)); EXPECT_EQ(fusion_analysis.flop_count(), 120); EXPECT_EQ(fusion_analysis.transcendental_count(), 0); EXPECT_EQ(fusion_analysis.bytes_accessed(), sizeof(float) * (2 * 3 * 4 * 5 + 3 + 2 * 3 * 4 * 5)); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 3 * 4 * 5); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 1), sizeof(float) * 3); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 3 * 4 * 5); } TEST_F(FusionCostAnalysis, NonTupleWithTupleParamBytesAccessed) { absl::string_view hlo_string = R"( HloModule module, is_scheduled=true fused_computation { param = (f32[3,2]{1,0}, f32[3,2]{1,0}) parameter(0) gte0 = f32[3,2]{1,0} get-tuple-element(param), index=0 gte1 = f32[3,2]{1,0} get-tuple-element(param), index=1 ROOT add = f32[3,2]{1,0} add(gte0, gte1) } ENTRY entry { param0 = f32[3,2]{1,0} parameter(0) param1 = f32[3,2]{1,0} parameter(1) tuple = (f32[3,2]{1,0}, f32[3,2]{1,0}) tuple(param0, param1) ROOT fusion = f32[3,2]{1,0} fusion(tuple), kind=kLoop, calls=fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* fusion = module->entry_computation()->root_instruction(); HloCostAnalysis fusion_analysis(ShapeSize); ASSERT_IS_OK(fusion->Accept(&fusion_analysis)); EXPECT_EQ(fusion_analysis.bytes_accessed(*fusion), sizeof(float) * 3 * 2 * 3); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 3 * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion), sizeof(float) * 3 * 2); } TEST_F(FusionCostAnalysis, TupleBytesAccessed) { absl::string_view hlo_string = R"( HloModule module, is_scheduled=true fused_computation { param = (f32[2,2]{1,0}, f32[2,2]{1,0}) parameter(0) gte0 = f32[2,2]{1,0} get-tuple-element(param), index=0 gte1 = f32[2,2]{1,0} get-tuple-element(param), index=1 add = f32[2,2]{1,0} add(gte0, gte1) mul = f32[2,2]{1,0} multiply(gte0, gte1) ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(add, mul) } ENTRY entry { param0 = f32[2,2]{1,0} parameter(0) param1 = f32[2,2]{1,0} parameter(1) tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(param0, param1) ROOT fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(tuple), kind=kLoop, calls=fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* fusion = module->entry_computation()->root_instruction(); HloCostAnalysis fusion_analysis(ShapeSize); ASSERT_IS_OK(fusion->Accept(&fusion_analysis)); EXPECT_EQ(fusion_analysis.bytes_accessed(*fusion), sizeof(float) * 2 * 2 * 4); EXPECT_EQ(fusion_analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2 * 2); EXPECT_EQ(fusion_analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2 * 2); } TEST_F(FusionCostAnalysis, IgnoreUnusedParameterShape) { absl::string_view hlo_string = R"( HloModule m f { p0 = (s8[3], s8[100]) parameter(0) gte0 = s8[3] get-tuple-element(p0), index=0 c1 = s8[3] constant(0) a1 = s8[3] add(gte0, c1) ROOT r1 = s8[3] add(a1, c1) } ENTRY e { param0 = (s8[3], s8[100]) parameter(0) ROOT r0 = s8[3] fusion(param0), kind=kInput, calls=f } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* root = module->entry_computation()->root_instruction(); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK(root->Accept(&analysis)); EXPECT_EQ(analysis.output_bytes_accessed(*root), 3); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 2 * kPointerSize + 3); EXPECT_EQ(analysis.bytes_accessed(*root), 2 * kPointerSize + 3 + 3 + 3); EXPECT_EQ(analysis.bytes_accessed(), 2 * kPointerSize + 3 + 3 + 3); } TEST_F(FusionCostAnalysis, InfeedOutfeed) { absl::string_view hlo_string = R"( HloModule module, is_scheduled=true ENTRY entry { after-all = token[] after-all() infeed = ((f32[2,3]{1,0}), token[]) infeed(after-all) gte0 = (f32[2,3]{1,0}) get-tuple-element(infeed), index=0 gte1 = f32[2,3]{1,0} get-tuple-element(gte0), index=0 add = f32[2,3]{1,0} add(gte1, gte1) tuple = (f32[2,3]{1,0}) tuple(add) tok = token[] get-tuple-element(infeed), index=1 ROOT outfeed = token[] outfeed(tuple, tok) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* infeed = module->entry_computation()->GetInstructionWithName("infeed"); HloInstruction* outfeed = module->entry_computation()->GetInstructionWithName("outfeed"); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK(infeed->Accept(&analysis)); ASSERT_IS_OK(outfeed->Accept(&analysis)); EXPECT_EQ(analysis.bytes_accessed(*infeed), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.operand_bytes_accessed(*infeed, 0), 0); EXPECT_EQ(analysis.output_bytes_accessed(*infeed), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.bytes_accessed(*outfeed), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.operand_bytes_accessed(*outfeed, 0), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.output_bytes_accessed(*outfeed), 0); } TEST_F(FusionCostAnalysis, AllReduceTupleBytesAccessed) { absl::string_view hlo_string = R"( HloModule module, is_scheduled=true sum { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY entry { param0 = f32[2,2]{1,0} parameter(0) param1 = f32[2,2]{1,0} parameter(1) ROOT all-reduce = (f32[2,2]{1,0}, f32[2,2]{1,0}) all-reduce(param0, param1), replica_groups={{0,1}}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* all_reduce = module->entry_computation()->root_instruction(); HloCostAnalysis all_reduce_analysis(ShapeSize); ASSERT_IS_OK(all_reduce->Accept(&all_reduce_analysis)); EXPECT_EQ(all_reduce_analysis.bytes_accessed(*all_reduce), sizeof(float) * 2 * 2 * 4); EXPECT_EQ(all_reduce_analysis.operand_bytes_accessed(*all_reduce, 0), sizeof(float) * 2 * 2); EXPECT_EQ(all_reduce_analysis.operand_bytes_accessed(*all_reduce, 1), sizeof(float) * 2 * 2); EXPECT_EQ(all_reduce_analysis.output_bytes_accessed(*all_reduce), sizeof(float) * 2 * 2 * 2); } TEST_F(HloCostAnalysisTest, TupleCost) { HloCostAnalysis analysis(ShapeSize); XlaBuilder builder("tuple"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {123}), "x"); auto y = Parameter(&builder, 1, ShapeUtil::MakeShape(F32, {42}), "y"); Tuple(&builder, {x, y}); auto hlo_module = BuildHloGraph(&builder); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 0); EXPECT_EQ(analysis.transcendental_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), kPointerSize * 2); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 0); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), 0); EXPECT_EQ(analysis.output_bytes_accessed(*root), kPointerSize * 2); } using DomainCostAnalysis = HloTestBase; TEST_F(DomainCostAnalysis, DomainCost) { HloCostAnalysis analysis(ShapeSize); HloComputation::Builder builder("domain"); auto x = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {123}), "x")); auto y = builder.AddInstruction( HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {42}), "y")); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({x, y})); auto domain = builder.AddInstruction( HloInstruction::CreateDomain(tuple->shape(), tuple, nullptr, nullptr)); auto hlo_module = CreateNewVerifiedModule(); hlo_module->AddEntryComputation(builder.Build()); EXPECT_EQ(hlo_module->entry_computation()->root_instruction(), domain); ASSERT_IS_OK(domain->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(*domain), 0); EXPECT_EQ(analysis.transcendental_count(*domain), 0); EXPECT_EQ(analysis.bytes_accessed(*domain), 0); } TEST_F(HloCostAnalysisTest, BaseDilatedConvolution) { XlaBuilder builder("BaseDilatedConvolution"); auto input = Parameter( &builder, 0, ShapeUtil::MakeShape(F32, {1, 1, 10, 20}), "input"); auto kernel = Parameter( &builder, 1, ShapeUtil::MakeShape(F32, {1, 1, 3, 3}), "kernel"); ConvGeneralDilated(input, kernel, {1, 1}, {{1, 1}, {1, 1}}, {3, 5}, {7, 11}, XlaBuilder::CreateDefaultConvDimensionNumbers(2)); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.flop_count(), 1472); } TEST_F(HloCostAnalysisTest, Slice) { XlaBuilder builder("slice"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x"); Slice(x, {0}, {1}, {1}); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.bytes_accessed(), 8); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float)); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float)); } TEST_F(HloCostAnalysisTest, DynamicSlice) { XlaBuilder builder("dynamic-slice"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x"); DynamicSlice(x, absl::Span<const XlaOp>({ConstantR0<int32_t>(&builder, 1)}), {1}); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.bytes_accessed(), 8 + 4); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float)); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t)); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float)); } TEST_F(HloCostAnalysisTest, DynamicUpdateSlice) { XlaBuilder builder("dynamic-update-slice"); auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {2}), "x"); DynamicUpdateSlice( x, ConstantR1<float>(&builder, {1.0}), absl::Span<const XlaOp>({ConstantR0<int32_t>(&builder, 1)})); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.bytes_accessed(), 8 + 4); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 0); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(float)); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(int32_t)); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float)); } TEST_F(HloCostAnalysisTest, Gather) { XlaBuilder builder("gather"); Shape operand_shape = ShapeUtil::MakeShape(S32, {3, 3}); Shape indices_shape = ShapeUtil::MakeShape(S32, {2}); auto operand = Parameter(&builder, 0, operand_shape, "operand"); auto indices = Parameter(&builder, 1, indices_shape, "indices"); GatherDimensionNumbers dim_numbers; dim_numbers.add_offset_dims(1); dim_numbers.add_collapsed_slice_dims(0); dim_numbers.add_start_index_map(0); dim_numbers.set_index_vector_dim(1); Gather(operand, indices, dim_numbers, {1, 3}); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.bytes_accessed(), 56); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t) * 2); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 3); } TEST_F(HloCostAnalysisTest, Scatter) { XlaBuilder builder("scatter"); Shape operand_shape = ShapeUtil::MakeShape(F32, {3, 3}); Shape indices_shape = ShapeUtil::MakeShape(S32, {2}); Shape values_shape = ShapeUtil::MakeShape(F32, {2, 3}); auto operand = Parameter(&builder, 0, operand_shape, "operand"); auto indices = Parameter(&builder, 1, indices_shape, "indices"); auto values = Parameter(&builder, 2, values_shape, "values"); ScatterDimensionNumbers dim_numbers; dim_numbers.set_index_vector_dim(1); dim_numbers.add_update_window_dims(1); dim_numbers.add_inserted_window_dims(0); dim_numbers.add_scatter_dims_to_operand_dims(0); Scatter(operand, indices, values, add_, dim_numbers); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.bytes_accessed(), 4 * (2 + 3 * (2 * 3))); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t) * 2); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.output_bytes_accessed(*root), sizeof(float) * 2 * 3); } TEST_F(HloCostAnalysisTest, MultioutputScatter) { XlaBuilder builder("scatter"); Shape operand0_shape = ShapeUtil::MakeShape(F32, {3, 3}); Shape operand1_shape = ShapeUtil::MakeShape(S32, {3, 3}); Shape indices_shape = ShapeUtil::MakeShape(S32, {2}); Shape values0_shape = ShapeUtil::MakeShape(F32, {2, 3}); Shape values1_shape = ShapeUtil::MakeShape(S32, {2, 3}); auto operand0 = Parameter(&builder, 0, operand0_shape, "operand0"); auto operand1 = Parameter(&builder, 1, operand1_shape, "operand1"); auto indices = Parameter(&builder, 2, indices_shape, "indices"); auto values0 = Parameter(&builder, 3, values0_shape, "values0"); auto values1 = Parameter(&builder, 4, values1_shape, "values1"); ScatterDimensionNumbers dim_numbers; dim_numbers.set_index_vector_dim(1); dim_numbers.add_update_window_dims(1); dim_numbers.add_inserted_window_dims(0); dim_numbers.add_scatter_dims_to_operand_dims(0); auto add = [] { XlaBuilder builder("add"); auto x0 = Parameter(&builder, 0, ShapeUtil::MakeShape(F32, {}), "x0"); auto x1 = Parameter(&builder, 1, ShapeUtil::MakeShape(S32, {}), "x1"); auto y0 = Parameter(&builder, 2, ShapeUtil::MakeShape(F32, {}), "y0"); auto y1 = Parameter(&builder, 3, ShapeUtil::MakeShape(S32, {}), "y1"); Tuple(&builder, {Add(x0, y0), Add(x1, y1)}); auto computation_status = builder.Build(); TF_CHECK_OK(computation_status.status()); return std::move(computation_status).value(); }(); Scatter({operand0, operand1}, indices, {values0, values1}, add, dim_numbers); auto hlo_module = BuildHloGraph(&builder); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK( hlo_module->entry_computation()->root_instruction()->Accept(&analysis)); EXPECT_EQ(analysis.bytes_accessed(), 4 * (2 + 2 * 3 * (2 * 3))); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 1), sizeof(int32_t) * 2 * 3); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 2), sizeof(int32_t) * 2); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 3), sizeof(float) * 2 * 3); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 4), sizeof(int32_t) * 2 * 3); EXPECT_EQ(analysis.output_bytes_accessed(*root), 2 * sizeof(float) * 2 * 3); } TEST_F(HloCostAnalysisTest, GetShapeSizeIgnoreUnsupportedShape) { Shape shape = ShapeUtil::MakeShape(F32, {2, 3}); *shape.mutable_layout() = LayoutUtil::MakeLayout({1, 0}, {DIM_DENSE, DIM_COMPRESSED}); HloCostAnalysis analysis(ShapeSize); EXPECT_TRUE(LayoutUtil::IsSparseArray(shape)); EXPECT_EQ(0, analysis.GetShapeSize(shape)); } TEST_F(FusionCostAnalysis, Broadcast) { absl::string_view hlo_string = R"( HloModule m f { p0 = s8[] parameter(0) c1 = s8[] constant(0) a1 = s8[] add(p0, c1) b1 = s8[10000] broadcast(a1), dimensions={} b2 = s8[10000] broadcast(c1), dimensions={} ROOT r1 = s8[10000] add(b1, b2) } ENTRY e { param0 = s8[] parameter(0) ROOT r0 = s8[10000] fusion(param0), kind=kInput, calls=f } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* root = module->entry_computation()->root_instruction(); HloCostAnalysis analysis(ShapeSize); ASSERT_IS_OK(root->Accept(&analysis)); EXPECT_EQ(analysis.output_bytes_accessed(*root), 10000); EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 1); EXPECT_EQ(analysis.bytes_accessed(*root), 10000 + 1); EXPECT_EQ(analysis.bytes_accessed(), 10000 + 1); } TEST_F(FusionCostAnalysis, RevisitModifiedFusion) { Shape r2f32 = ShapeUtil::MakeShape(F32, {2, 2}); HloComputation::Builder builder(TestName()); HloInstruction* c1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( 0.0f, 1.0f, 2, 2))); HloInstruction* add = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kAdd, c1, c1)); HloInstruction* mul = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, add, add)); HloInstruction* neg = builder.AddInstruction( HloInstruction::CreateUnary(r2f32, HloOpcode::kNegate, mul)); auto module = CreateNewVerifiedModule(); HloComputation* computation = module->AddEntryComputation(builder.Build()); HloInstruction* fusion = computation->CreateFusionInstruction( {neg, mul, add}, HloInstruction::FusionKind::kLoop); HloCostAnalysis::Options options{ShapeSize}; HloCostAnalysis analysis(options); ASSERT_IS_OK(fusion->Accept(&analysis)); constexpr int64_t bytes_accessed = sizeof(float) * 2 * 2 * 2; static_assert(bytes_accessed == 32, ""); EXPECT_EQ(analysis.flop_count(), 4 * 3); EXPECT_EQ(analysis.transcendental_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), bytes_accessed); EXPECT_EQ(analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2); EXPECT_EQ(analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2); ASSERT_IS_OK(analysis.RevisitInstruction(fusion)); EXPECT_EQ(analysis.flop_count(), 4 * 3); EXPECT_EQ(analysis.transcendental_count(), 0); EXPECT_EQ(analysis.bytes_accessed(), bytes_accessed); EXPECT_EQ(analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2); EXPECT_EQ(analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2); HloComputation* fused_computation = fusion->fused_instructions_computation(); HloInstruction* to_replace = fused_computation->root_instruction(); HloInstruction* exp = fused_computation->AddInstruction(HloInstruction::CreateUnary( r2f32, HloOpcode::kExp, to_replace->mutable_operand(0))); ASSERT_IS_OK(fused_computation->ReplaceInstruction(to_replace, exp)); ASSERT_IS_OK(module->Verify()); ASSERT_IS_OK(analysis.RevisitInstruction(fusion)); EXPECT_EQ(analysis.flop_count(), 4 * 2); EXPECT_EQ(analysis.transcendental_count(), 4); EXPECT_EQ(analysis.bytes_accessed(), bytes_accessed); EXPECT_EQ(analysis.operand_bytes_accessed(*fusion, 0), sizeof(float) * 2 * 2); EXPECT_EQ(analysis.output_bytes_accessed(*fusion), sizeof(float) * 2 * 2); } TEST_F(FusionCostAnalysis, RevisitAlteredFusion) { absl::string_view hlo_string = R"( HloModule m f { fp0 = s8[10] parameter(0) ROOT fr = s8[1] slice(fp0), slice={[0:1]} } ENTRY e { p0 = s8[10] parameter(0) ROOT r = s8[1] fusion(p0), kind=kLoop, calls=f })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* root = module->entry_computation()->root_instruction(); HloCostAnalysis modified_analysis(ShapeSize); ASSERT_IS_OK(root->Accept(&modified_analysis)); HloInstruction* fusion_root = root->called_computations()[0]->root_instruction(); EXPECT_FLOAT_EQ(modified_analysis.operand_utilization(*fusion_root, 0), 0.1); fusion_root->mutable_slice_limits()->at(0) = 2; fusion_root->mutable_shape()->mutable_dimensions()[0] = 2; root->mutable_shape()->mutable_dimensions()[0] = 2; module->mutable_config().SetDefaultComputationLayout( module->entry_computation()->ComputeProgramShape()); ASSERT_IS_OK(modified_analysis.RevisitInstruction(root)); HloCostAnalysis unmodified_analysis(ShapeSize); ASSERT_IS_OK(root->Accept(&unmodified_analysis)); EXPECT_FLOAT_EQ(modified_analysis.operand_utilization(*fusion_root, 0), 0.2); EXPECT_FLOAT_EQ(modified_analysis.operand_utilization(*fusion_root, 0), unmodified_analysis.operand_utilization(*fusion_root, 0)); } TEST_F(FusionCostAnalysis, RevisitWithSharedComputation) { absl::string_view hlo_string = R"( HloModule m add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT r = f32[] add(arg_0.1, arg_1.1) } ENTRY e { p0 = f32[127,125] parameter(0) p1 = f32[127,125] parameter(1) constant_zero = f32[] constant(0) r0 = f32[127] reduce(p0, constant_zero), dimensions={1}, to_apply=add_computation r1 = f32[127] reduce(p0, constant_zero), dimensions={1}, to_apply=add_computation ROOT _ = f32[127] add(r0, r1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* root = module->entry_computation()->root_instruction(); HloCostAnalysis analysis(ShapeSize); HloInstruction* add_root = root->operand(1)->called_computations()[0]->root_instruction(); ASSERT_IS_OK(root->Accept(&analysis)); EXPECT_EQ(analysis.operand_utilization(*add_root, 0), 1); ASSERT_IS_OK(analysis.RemoveInstruction(root->mutable_operand(0))); EXPECT_EQ(analysis.operand_utilization(*add_root, 0), 1); ASSERT_IS_OK(analysis.RevisitInstruction(root->mutable_operand(0))); EXPECT_EQ(analysis.operand_utilization(*add_root, 0), 1); } using Properties = HloCostAnalysis::Properties; constexpr auto kFlopsKey = HloCostAnalysis::kFlopsKey; constexpr auto kTranscendentalsKey = HloCostAnalysis::kTranscendentalsKey; constexpr auto kBytesAccessedKey = HloCostAnalysis::kBytesAccessedKey; constexpr auto kOptimalSecondsKey = HloCostAnalysis::kOptimalSecondsKey; constexpr auto kUtilizationKey = HloCostAnalysis::kUtilizationKey; constexpr auto kReserved0Key = HloCostAnalysis::kReserved0Key; constexpr auto kReserved1Key = HloCostAnalysis::kReserved1Key; TEST(HloCostAnalysisProperties, ZeroWhenInitialized) { Properties p; EXPECT_EQ(0, p[kFlopsKey]); EXPECT_EQ(0, p[kTranscendentalsKey]); EXPECT_EQ(0, p[kBytesAccessedKey]); EXPECT_EQ(0, p[kOptimalSecondsKey]); EXPECT_EQ(0, p[kUtilizationKey]); EXPECT_EQ(0, p[kReserved0Key]); EXPECT_EQ(0, p[kReserved1Key]); EXPECT_EQ(0, p.operand_utilization(0, {})); EXPECT_EQ(0, p.operand_utilization(1, {})); EXPECT_EQ(0, p.operand_utilization(2, {})); EXPECT_EQ(0, p.operand_utilization(0, {0})); EXPECT_EQ(0, p.operand_utilization(2, {0})); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(0, {})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(1, {})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(2, {})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(0, {0})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandUtilizationKey(2, {0})]); EXPECT_EQ(0, p.operand_bytes_accessed(0, {})); EXPECT_EQ(0, p.operand_bytes_accessed(1, {})); EXPECT_EQ(0, p.operand_bytes_accessed(2, {})); EXPECT_EQ(0, p.operand_bytes_accessed(0, {0})); EXPECT_EQ(0, p.operand_bytes_accessed(2, {0})); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(1, {})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {0})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {0})]); EXPECT_EQ(0, p.output_bytes_accessed({})); EXPECT_EQ(0, p.output_bytes_accessed({0})); EXPECT_EQ(0, p[HloCostAnalysis::GetOutputBytesAccessedKey({})]); EXPECT_EQ(0, p[HloCostAnalysis::GetOutputBytesAccessedKey({0})]); EXPECT_EQ(0, p["foobar"]); std::vector<std::pair<std::string, float>> vals; Properties().ForEach([&](absl::string_view key, float val) { vals.push_back({std::string(key), val}); }); EXPECT_THAT(vals, ::testing::IsEmpty()); } TEST(HloCostAnalysisProperties, SetValues) { Properties p; p[kFlopsKey] = 1; p[kTranscendentalsKey] = 2; p[kBytesAccessedKey] = 3; p[kOptimalSecondsKey] = 4; p[kUtilizationKey] = 5; p[kReserved0Key] = 6; p[kReserved1Key] = 7; EXPECT_EQ(1, p[kFlopsKey]); EXPECT_EQ(2, p[kTranscendentalsKey]); EXPECT_EQ(3, p[kBytesAccessedKey]); EXPECT_EQ(4, p[kOptimalSecondsKey]); EXPECT_EQ(5, p[kUtilizationKey]); EXPECT_EQ(6, p[kReserved0Key]); EXPECT_EQ(7, p[kReserved1Key]); p.set_operand_utilization(0, {}, 10); p.set_operand_utilization(1, {}, 11); p.set_operand_utilization(2, {}, 12); p.set_operand_utilization(0, {0}, 13); p.set_operand_utilization(2, {0}, 14); EXPECT_EQ(10, p.operand_utilization(0, {})); EXPECT_EQ(11, p.operand_utilization(1, {})); EXPECT_EQ(12, p.operand_utilization(2, {})); EXPECT_EQ(13, p.operand_utilization(0, {0})); EXPECT_EQ(14, p.operand_utilization(2, {0})); EXPECT_EQ(10, p[HloCostAnalysis::GetOperandUtilizationKey(0, {})]); EXPECT_EQ(11, p[HloCostAnalysis::GetOperandUtilizationKey(1, {})]); EXPECT_EQ(12, p[HloCostAnalysis::GetOperandUtilizationKey(2, {})]); EXPECT_EQ(13, p[HloCostAnalysis::GetOperandUtilizationKey(0, {0})]); EXPECT_EQ(14, p[HloCostAnalysis::GetOperandUtilizationKey(2, {0})]); p.set_operand_bytes_accessed(0, {}, 20); p.set_operand_bytes_accessed(1, {}, 21); p.set_operand_bytes_accessed(2, {}, 22); p.set_operand_bytes_accessed(0, {0}, 23); p.set_operand_bytes_accessed(2, {0}, 24); EXPECT_EQ(20, p.operand_bytes_accessed(0, {})); EXPECT_EQ(21, p.operand_bytes_accessed(1, {})); EXPECT_EQ(22, p.operand_bytes_accessed(2, {})); EXPECT_EQ(23, p.operand_bytes_accessed(0, {0})); EXPECT_EQ(24, p.operand_bytes_accessed(2, {0})); EXPECT_EQ(20, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {})]); EXPECT_EQ(21, p[HloCostAnalysis::GetOperandBytesAccessedKey(1, {})]); EXPECT_EQ(22, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {})]); EXPECT_EQ(23, p[HloCostAnalysis::GetOperandBytesAccessedKey(0, {0})]); EXPECT_EQ(24, p[HloCostAnalysis::GetOperandBytesAccessedKey(2, {0})]); p.set_output_bytes_accessed({}, 30); p.set_output_bytes_accessed({0}, 31); EXPECT_EQ(30, p.output_bytes_accessed({})); EXPECT_EQ(31, p.output_bytes_accessed({0})); EXPECT_EQ(30, p[HloCostAnalysis::GetOutputBytesAccessedKey({})]); EXPECT_EQ(31, p[HloCostAnalysis::GetOutputBytesAccessedKey({0})]); p["foo"] = 100; EXPECT_EQ(100, p["foo"]); p["bar"] += 101; EXPECT_EQ(101, p["bar"]); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cost_analysis.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cost_analysis_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
794dc36a-5c72-49a9-bb2c-dbc91bcd6fe0
cpp
tensorflow/tensorflow
hlo_computation_deduplicator
third_party/xla/xla/service/hlo_computation_deduplicator.cc
third_party/xla/xla/service/hlo_computation_deduplicator_test.cc
#include "xla/service/hlo_computation_deduplicator.h" #include <string> #include <utility> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/shape_util.h" #include "tsl/platform/logging.h" namespace xla { bool HloComputationDeduplicator::ContainsLargeConstants(HloComputation* comp) { int total_size = 0; for (HloInstruction* instruction : comp->instructions()) { if (instruction->IsConstant()) { total_size += ShapeUtil::ArrayDataSize(instruction->literal().shape()); if (total_size > 1024) { return true; } } } return false; } absl::StatusOr<bool> HloComputationDeduplicator::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { absl::flat_hash_map<std::string, HloComputation*> unique_comps; absl::flat_hash_map<HloComputation*, HloComputation*> replacement; HloPrintOptions options = HloPrintOptions::Canonical(); options.set_print_subcomputation_mode( HloPrintOptions::PrintSubcomputationMode::kOff); options.set_print_infeed_outfeed_config(false); options.set_print_only_essential_constants(true); options.set_print_operand_shape(true); options.set_print_ids(false); options.set_canonicalize_computations(true); auto comp_eq = [&replacement](const HloComputation* a, const HloComputation* b) { if (a->unique_id() == b->unique_id()) return true; if (replacement.contains(a) && replacement.at(a)->unique_id() == b->unique_id()) { return true; } if (replacement.contains(b) && replacement.at(b)->unique_id() == a->unique_id()) { return true; } if (replacement.contains(a) && replacement.contains(b) && replacement.at(a)->unique_id() == replacement.at(b)->unique_id()) { return true; } return false; }; for (HloComputation* comp : module->MakeComputationPostOrder(execution_threads)) { if (comp->IsEntryComputation() || comp->instruction_count() > 128 || ContainsLargeConstants(comp) || comp->IsCollectiveCalledComputation()) { continue; } std::string comp_str = comp->ToString(options); auto poss_dup = unique_comps.find(comp_str); if (poss_dup != unique_comps.end() && poss_dup->second->Equal(*comp, true, comp_eq)) { VLOG(2) << "Replacing " << comp->name() << " with " << poss_dup->second->name(); replacement[comp] = poss_dup->second; } else { unique_comps[std::move(comp_str)] = comp; } } if (mark_fusion_duplications_) { module->MarkFusionDuplications(replacement); } else { module->ReplaceComputations(replacement); } return !replacement.empty(); } }
#include "xla/service/hlo_computation_deduplicator.h" #include <cstdint> #include <memory> #include <string> #include <string_view> #include <vector> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" namespace xla { namespace { class HloComputationDeduplicatorTest : public HloTestBase { protected: std::vector<std::string> RunDeduplicatePass(const std::string_view text, bool expect_true) { std::unique_ptr<HloModule> module = ParseAndReturnVerifiedModule(text).value(); HloComputationDeduplicator dedup; bool changed = dedup.Run(module.get()).value(); EXPECT_EQ(changed, expect_true); std::vector<std::string> computation_names; for (auto comp : module->computations()) { computation_names.emplace_back(comp->name()); } return computation_names; } }; TEST_F(HloComputationDeduplicatorTest, RemoveRegionBandC) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0}, s32[20]{0})->s32[]} region_A { Arg_0.6 = s32[] parameter(0) Arg_1.7 = s32[] parameter(1) ROOT add.8 = s32[] add(Arg_0.6, Arg_1.7) } region_B { Arg_0.11 = s32[] parameter(0) Arg_1.12 = s32[] parameter(1) ROOT add.13 = s32[] add(Arg_0.11, Arg_1.12) } region_C { Arg_0.17 = s32[] parameter(0) Arg_1.18 = s32[] parameter(1) ROOT add.19 = s32[] add(Arg_0.17, Arg_1.18) } ENTRY main.22 { Arg_0.1 = s32[10]{0} parameter(0) Arg_1.2 = s32[15]{0} parameter(1) Arg_2.3 = s32[20]{0} parameter(2) constant.4 = s32[] constant(0) reduce.9 = s32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=region_A reduce.14 = s32[] reduce(Arg_1.2, constant.4), dimensions={0}, to_apply=region_B reduce.20 = s32[] reduce(Arg_2.3, constant.4), dimensions={0}, to_apply=region_C multiply.15 = s32[] multiply(reduce.9, reduce.14) ROOT multiply.21 = s32[] multiply(multiply.15, reduce.20) } )"; auto computation_names = RunDeduplicatePass(text, true); for (auto name : computation_names) { EXPECT_NE(name, "region_B"); EXPECT_NE(name, "region_C"); } EXPECT_EQ(computation_names.size(), 2); } TEST_F(HloComputationDeduplicatorTest, RemoveRegionBExactCopy) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_A { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6) } region_B { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6) } ENTRY main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B ROOT multiply.14 = s32[] multiply(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, true); for (auto name : computation_names) { EXPECT_NE(name, "region_B"); } EXPECT_EQ(computation_names.size(), 2); } TEST_F(HloComputationDeduplicatorTest, RemoveRegionsWithSameSubcomp) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_X { Ag_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT their_sum = s32[] add(Ag_0, Arg_1) } region_Y { Arg_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT the_sum = s32[] add(Arg_0, Arg_1) } region_A { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6) } region_B { Arg_0.5 = s32[] parameter(0) Ar_1.6 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6) } main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y ROOT multiply.14 = s32[] multiply(rd1, rd2) } main.16 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B ROOT multiply.14 = s32[] multiply(rd1, rd2) } main.17 { Arg_0 = s32[10]{0} parameter(0) Arg_1 = s32[15]{0} parameter(1) rd1 = s32[] call(Arg_0, Arg_1), to_apply=main.15 rd2 = s32[] call(Arg_0, Arg_1), to_apply=main.16 ROOT ret = add(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, true); for (auto name : computation_names) { EXPECT_NE(name, "region_B"); EXPECT_NE(name, "region_A"); EXPECT_NE(name, "region_Y"); EXPECT_NE(name, "main.16"); } EXPECT_EQ(computation_names.size(), 3); } TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionsWithDifferentSubcomp) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_X { Ag_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT their_sum = s32[] multiply(Ag_0, Arg_1) } region_Y { Arg_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT the_sum = s32[] add(Arg_0, Arg_1) } region_A { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6) } region_B { Arg_0.5 = s32[] parameter(0) Ar_1.6 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6) } main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y ROOT multiply.14 = s32[] multiply(rd1, rd2) } main.16 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B ROOT multiply.14 = s32[] multiply(rd1, rd2) } main.17 { Arg_0 = s32[10]{0} parameter(0) Arg_1 = s32[15]{0} parameter(1) rd1 = s32[] call(Arg_0, Arg_1), to_apply=main.15 rd2 = s32[] call(Arg_0, Arg_1), to_apply=main.16 ROOT ret = add(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, true); int region_x_count = 0; int region_y_count = 0; int main_16_count = 0; int main_15_count = 0; int region_a_count = 0; int region_b_count = 0; for (auto name : computation_names) { region_x_count += (name == "region_X"); region_y_count += (name == "region_Y"); main_15_count += (name == "main.15"); main_16_count += (name == "main.16"); region_a_count += (name == "region_A"); region_b_count += (name == "region_B"); } EXPECT_EQ(region_a_count, 0); EXPECT_EQ(region_b_count, 0); EXPECT_EQ(main_15_count, 1); EXPECT_EQ(main_16_count, 1); EXPECT_EQ(region_x_count, 1); EXPECT_EQ(region_y_count, 1); } TEST_F(HloComputationDeduplicatorTest, RemoveRegionBVarDifferences) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_A { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6) } region_B { Arg_0.2 = s32[] parameter(0) Arg_1.3 = s32[] parameter(1) ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3) } ENTRY main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B ROOT multiply.14 = s32[] multiply(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, true); for (auto name : computation_names) { EXPECT_NE(name, "region_B"); } EXPECT_EQ(computation_names.size(), 2); } TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBCommutative) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_A { Arg_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT add.7 = s32[] add(Arg_1, Arg_0) } region_B { Arg_0.2 = s32[] parameter(0) Arg_1.3 = s32[] parameter(1) ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3) } ENTRY main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B ROOT multiply.14 = s32[] multiply(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, false); int region_b_count = 0; for (auto name : computation_names) { region_b_count += (name == "region_B"); } EXPECT_EQ(region_b_count, 1); EXPECT_EQ(computation_names.size(), 3); } TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBDifferentExecutionThread) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_A { Arg_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT add = s32[] add(Arg_0, Arg_1) } region_B { Arg_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT add = s32[] add(Arg_0, Arg_1) } called_computation { Arg_0 = s32[15]{0} parameter(0) Cst = s32[] constant(0) ROOT rd2 = s32[] reduce(Arg_0, Cst), dimensions={0}, to_apply=region_B }, execution_thread="parallel_thread" ENTRY main.15 { Arg_0 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0, constant.3), dimensions={0}, to_apply=region_A Arg_1 = s32[15]{0} parameter(1) call-start = ((s32[15]{0}), s32[], s32[]) call-start(Arg_1), async_execution_thread="parallel_thread", to_apply=%called_computation call-done = s32[] call-done(call-start) ROOT multiply.14 = s32[] multiply(rd1, call-done) } )"; auto computation_names = RunDeduplicatePass(text, false); int region_b_count = 0; for (auto name : computation_names) { region_b_count += (name == "region_B"); } EXPECT_EQ(region_b_count, 1); EXPECT_EQ(computation_names.size(), 5); } TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionLargeConstant) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_A { Arg_00 = s32[] parameter(0) Arg_1_1 = s32[] parameter(1) Arg_0 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_1 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_2 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_3 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_4 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_5 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) add1 = s32[10, 10] add(Arg_1, Arg_0) add2 = s32[10, 10] add(Arg_2, Arg_3) add3 = s32[10, 10] add(Arg_4, Arg_5) add8 = s32[10, 10] add(add1, add2) addv = s32[10, 10] add(add3, add8) ROOT ret = add(Arg_00, Arg_1_1) } region_B { Arg_00 = s32[] parameter(0) Arg_1_1 = s32[] parameter(1) Arg_0 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_1 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_2 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_3 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_4 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) Arg_5 = s32[10, 10] constant({{1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}, {1,2,3,4,5,6,7,8,9,10}}) add1 = s32[10, 10] add(Arg_1, Arg_0) add2 = s32[10, 10] add(Arg_2, Arg_3) add3 = s32[10, 10] add(Arg_4, Arg_5) add8 = s32[10, 10] add(add1, add2) addv = s32[10, 10] add(add3, add8) ROOT ret = add(Arg_00, Arg_1_1) } ENTRY main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B ROOT multiply.14 = s32[] multiply(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, false); int region_b_count = 0; for (auto comp : computation_names) { region_b_count += (comp == "region_B"); } EXPECT_EQ(region_b_count, 1); EXPECT_EQ(computation_names.size(), 3); } TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBDifferentcomp) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_A { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6) } region_B { Arg_0.2 = s32[] parameter(0) Arg_1.3 = s32[] parameter(1) ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3) } ENTRY main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B ROOT multiply.14 = s32[] multiply(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, false); int region_b_count = 0; for (auto name : computation_names) { region_b_count += (name == "region_B"); } EXPECT_EQ(region_b_count, 1); EXPECT_EQ(computation_names.size(), 3); } TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBDifferentType) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s16[15]{0})->s16[]} region_A { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6) } region_B { Arg_0.5 = s16[] parameter(0) Arg_1.6 = s16[] parameter(1) ROOT add.7 = s16[] multiply(Arg_0.5, Arg_1.6) } ENTRY main.15 { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(5) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A Arg_1.2 = s16[15]{0} parameter(1) constant.4 = s16[] constant(5) rd2 = s16[] reduce(Arg_1.2, constant.4), dimensions={0}, to_apply=region_B } )"; auto computation_names = RunDeduplicatePass(text, false); int region_b_count = 0; for (auto comp : computation_names) { region_b_count += (comp == "region_B"); } EXPECT_EQ(region_b_count, 1); EXPECT_EQ(computation_names.size(), 3); } TEST_F(HloComputationDeduplicatorTest, DontRemoveRegionBEntryComp) { const std::string_view text = R"( HloModule DeDupTest, entry_computation_layout={(s32[10]{0},s32[15]{0})->s32[]} region_A1 { Arg_0.5 = s32[] parameter(0) Arg_1.6 = s32[] parameter(1) ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6) } region_B1 { Arg_0.2 = s32[] parameter(0) Arg_1.3 = s32[] parameter(1) ROOT add.8 = s32[] add(Arg_0.2, Arg_1.3) } ENTRY region_B { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A1 Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B1 ROOT multiply.14 = s32[] multiply(rd1, rd2) } region_A { Arg_0.1 = s32[10]{0} parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A1 Arg_1.2 = s32[15]{0} parameter(1) rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B1 ROOT multiply.14 = s32[] multiply(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, false); EXPECT_EQ(computation_names.size(), 4); } TEST_F(HloComputationDeduplicatorTest, LargeSubComputationTest) { const Shape shape = ShapeUtil::MakeScalarShape(S32); const int total_regions = 2; const int max_insns = 128; std::vector<HloComputation> comps; auto module = CreateNewVerifiedModule(); for (int region = 0; region < total_regions; region++) { HloComputation::Builder builder("region_" + std::to_string(region)); auto curr = builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "a0")); auto next = builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "a1")); for (int i = 0; i < max_insns; i++) { next = builder.AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kAdd, curr, next)); } module->AddComputationAndUnifyNamesAndIds(builder.Build(), false); } HloComputation::Builder main("main_func"); std::vector<HloInstruction *> insns; std::vector<HloInstruction *> consts; for (int region = 0; region < total_regions; region++) { insns.push_back(main.AddInstruction( HloInstruction::CreateParameter(region, ShapeUtil::MakeShape(S32, {10}), "a" + std::to_string(region)))); consts.push_back(main.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(5 + region)))); } int region = 0; for (auto comp : module->computations()) { ASSERT_LT(region, total_regions); main.AddInstruction(HloInstruction::CreateReduce( ShapeUtil::MakeScalarShape(S32), insns[region], consts[region], {0}, comp)); } module->AddEntryComputation(main.Build()); HloComputationDeduplicator dedup; TF_ASSERT_OK_AND_ASSIGN(bool changed, dedup.Run(module.get())); EXPECT_FALSE(changed); std::vector<HloComputation *> computations = module->MakeComputationSorted(); EXPECT_EQ(computations.size(), (total_regions + 1)); } TEST_F(HloComputationDeduplicatorTest, DontDeduplicateReduceAllReduce) { const std::string_view text = R"( HloModule TestModule add.1 { Arg_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT add.2 = s32[] add(Arg_0, Arg_1) } add.2 { Arg_0 = s32[] parameter(0) Arg_1 = s32[] parameter(1) ROOT add.2 = s32[] add(Arg_0, Arg_1) } ENTRY main { Arg_0.1 = s32[10] parameter(0) constant.3 = s32[] constant(0) rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=add.1 Arg_1.1 = s32[] parameter(1) rd2 = s32[] all-reduce(Arg_1.1), to_apply=add.2 ROOT multiply.14 = s32[] multiply(rd1, rd2) } )"; auto computation_names = RunDeduplicatePass(text, false); EXPECT_EQ(computation_names.size(), 3); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_computation_deduplicator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_computation_deduplicator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bdb04ae8-b394-4376-8869-cbfb0c452ed3
cpp
tensorflow/tensorflow
gather_simplifier
third_party/xla/xla/service/gather_simplifier.cc
third_party/xla/xla/service/gather_simplifier_test.cc
#include "xla/service/gather_simplifier.h" #include <iterator> #include <vector> #include "absl/algorithm/container.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/literal_util.h" #include "xla/permutation_util.h" #include "xla/service/gather_scatter_utils.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape_util.h" #include "tsl/platform/statusor.h" namespace xla { absl::StatusOr<HloInstruction*> GatherSimplifier::ExpandInstruction( HloInstruction* inst) { auto* gather = DynCast<HloGatherInstruction>(inst); if (absl::c_linear_search(gather->gather_slice_sizes(), 0)) { auto* zero = gather->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(gather->shape().element_type()))); return gather->AddInstruction( HloInstruction::CreateBroadcast(gather->shape(), zero, {})); } const auto& dims = gather->gather_dimension_numbers(); int operand_rank = dims.collapsed_slice_dims().size() + dims.offset_dims().size(); auto [operand_permutation, operand_permutation_inverse] = MakeOperandStartIndexPermutations(dims.start_index_map(), operand_rank); auto* operand = gather->operands()[0]; auto* start_indices = gather->operands()[1]; TF_ASSIGN_OR_RETURN(operand, MaybeTranspose(operand, operand_permutation)); TF_ASSIGN_OR_RETURN( start_indices, TransformStartIndices(start_indices, dims.index_vector_dim())); auto slice_sizes = Permute(gather->gather_slice_sizes(), operand_permutation); std::vector<int64_t> output_dims = {start_indices->shape().dimensions(0)}; absl::c_copy(slice_sizes, std::back_inserter(output_dims)); Shape output_shape = ShapeUtil::MakeShape(operand->shape().element_type(), output_dims); std::vector<int64_t> offset_dims(operand_rank); absl::c_iota(offset_dims, 1); std::vector<int64_t> start_index_map(dims.start_index_map().size()); absl::c_iota(start_index_map, 0); auto* result = gather->AddInstruction(HloInstruction::CreateGather( output_shape, operand, start_indices, HloGatherInstruction::MakeGatherDimNumbers( offset_dims, {}, start_index_map, 1), slice_sizes, gather->indices_are_sorted())); std::vector<int64_t> output_permutation(1 + operand_rank); absl::c_transform(operand_permutation_inverse, output_permutation.begin() + 1, [](int64_t dim) { return dim + 1; }); TF_ASSIGN_OR_RETURN(result, MaybeTranspose(result, output_permutation)); if (!dims.collapsed_slice_dims().empty()) { std::vector<int64_t> collapsed_slice_dims( dims.collapsed_slice_dims().size()); absl::c_transform(dims.collapsed_slice_dims(), collapsed_slice_dims.begin(), [](int64_t dim) { return dim + 1; }); TF_ASSIGN_OR_RETURN(result, ElideDegenerateDims(result, collapsed_slice_dims)); } auto original_start_index_dims = gather->operands()[1]->shape().dimensions(); std::vector<int64_t> start_indices_dims; for (int i = 0; i < original_start_index_dims.size(); ++i) { if (i != dims.index_vector_dim()) { start_indices_dims.push_back(original_start_index_dims[i]); } } if (start_indices_dims.size() > 1) { TF_ASSIGN_OR_RETURN(result, ExpandFirstDimIntoNDims(result, start_indices_dims)); } else if (start_indices_dims.empty()) { TF_ASSIGN_OR_RETURN(result, ElideDegenerateDims(result, {0})); } std::vector<int64_t> output_perm; auto output_rank = static_cast<int64_t>(start_indices_dims.size() + dims.offset_dims().size()); output_perm.reserve(output_rank); auto offset_dim_index = static_cast<int64_t>(start_indices_dims.size()); int64_t start_index_dim_index = 0; for (int64_t i = 0; i < output_rank; ++i) { if (absl::c_linear_search(dims.offset_dims(), i)) { output_perm.push_back(offset_dim_index++); } else { output_perm.push_back(start_index_dim_index++); } } return MaybeTranspose(result, output_perm); } bool GatherSimplifier::IsSimplifiedGather(const HloGatherInstruction* gather) { auto* start_indices = gather->operands()[1]; const auto& dims = gather->gather_dimension_numbers(); return start_indices->shape().rank() == 2 && dims.index_vector_dim() == 1 && IsIdentityPermutation(dims.start_index_map()) && dims.collapsed_slice_dims().empty() && *dims.offset_dims().begin() == 1 && *dims.offset_dims().rbegin() == dims.offset_dims().size(); } bool GatherSimplifier::InstructionMatchesPattern(HloInstruction* inst) { auto* gather = DynCast<HloGatherInstruction>(inst); return gather && !IsSimplifiedGather(gather); } }
#include "xla/service/gather_simplifier.h" #include <optional> #include "xla/tests/hlo_test_base.h" namespace xla { namespace { class GatherSimplifierTest : public HloTestBase {}; TEST_F(GatherSimplifierTest, TransformsStartIndices) { constexpr absl::string_view kModuleStr = R"( HloModule gather_simplifier ENTRY kernel_entry { operand = f32[33,34] parameter(0) indices = s32[42,43] parameter(1) ROOT gather = f32[42,43,7,8] gather(operand, indices), offset_dims={2,3}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=2, slice_sizes={7,8} })"; RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"( CHECK: %[[VECTOR_DIM:.*]] = s32[42,43,1]{2,1,0} reshape(%indices) CHECK: %[[INDICES_2D:.*]] = s32[1806,1]{1,0} reshape(%[[VECTOR_DIM]]) CHECK: %[[GATHER:.*]] = f32[1806,7,8]{{.*}} gather( CHECK-SAME: %operand, %[[INDICES_2D]]), CHECK-SAME: offset_dims={1,2}, CHECK-SAME: collapsed_slice_dims={}, CHECK-SAME: start_index_map={0}, CHECK-SAME: index_vector_dim=1, CHECK-SAME: slice_sizes={7,8} CHECK: ROOT %{{.*}} = f32[42,43,7,8]{3,2,1,0} reshape(%[[GATHER]]) )"); } TEST_F(GatherSimplifierTest, RemovesCollapsedSliceDims) { constexpr absl::string_view kModuleStr = R"( HloModule gather_simplifier ENTRY kernel_entry { operand = f32[33,34] parameter(0) indices = s32[42,1] parameter(1) ROOT gather = f32[42] gather(operand, indices), offset_dims={}, collapsed_slice_dims={0,1}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,1} })"; RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"( CHECK: %[[GATHER:.*]] = f32[42,1,1]{2,1,0} gather(%operand, %indices) CHECK-SAME: offset_dims={1,2}, CHECK-SAME: collapsed_slice_dims={}, CHECK: ROOT %{{.*}} = f32[42]{0} reshape(%[[GATHER]]) )"); } TEST_F(GatherSimplifierTest, MakesStartIndexMapIdentity) { constexpr absl::string_view kModuleStr = R"( HloModule gather_simplifier ENTRY kernel_entry { operand = f32[33,34,35] parameter(0) indices = s32[42,3] parameter(1) ROOT gather = f32[42,1,2,3] gather(operand, indices), offset_dims={1,2,3}, collapsed_slice_dims={}, start_index_map={2,0,1}, index_vector_dim=1, slice_sizes={1,2,3} })"; RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"( %operand = f32[33,34,35]{2,1,0} parameter(0) CHECK: %[[OPERAND:.*]] = f32[35,33,34]{2,1,0} transpose(%operand) CHECK: %[[GATHER:.*]] = f32[42,3,1,2]{{.*}} gather(%[[OPERAND]], CHECK-SAME: start_index_map={0,1,2}, CHECK: ROOT {{.*}} = f32[42,1,2,3]{{.*}} transpose(%[[GATHER]]) )"); } TEST_F(GatherSimplifierTest, CollapsesSomeDims) { constexpr absl::string_view kModuleStr = R"( HloModule gather_simplifier ENTRY kernel_entry { operand = f32[33,34,35] parameter(0) indices = s32[42,1] parameter(1) ROOT gather = f32[7,42] gather(operand, indices), offset_dims={0}, collapsed_slice_dims={0,2}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,7,1} })"; RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"( CHECK: %[[GATHER:.*]] = f32[42,1,7,1]{3,2,1,0} gather( CHECK: %[[COLLAPSED:.*]] = f32[42,7]{1,0} reshape(%[[GATHER]]) CHECK: ROOT {{.*}} = f32[7,42]{1,0} transpose(%[[COLLAPSED]]), CHECK-SAME: dimensions={1,0} )"); } TEST_F(GatherSimplifierTest, ZeroDimStartIndices) { constexpr absl::string_view kModuleStr = R"( HloModule gather_simplifier ENTRY kernel_entry { operand = f32[8,16] parameter(0) indices = s32[2] parameter(1) ROOT gather = f32[8,16] gather(f32[8,16] operand, s32[2] indices), offset_dims={0,1}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={8,16} })"; RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"( CHECK: gather( )"); } TEST_F(GatherSimplifierTest, ZeroSizeSlice) { constexpr absl::string_view kModuleStr = R"( HloModule gather_simplifier ENTRY kernel_entry { operand = f32[0,2] parameter(0) indices = s32[3] parameter(1) ROOT gather = f32[3,2] gather(f32[0,2] operand, s32[3]{0} indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={0,2} })"; RunAndFilecheckHloRewrite(kModuleStr, GatherSimplifier(), R"( CHECK: %[[ZERO:.*]] = f32[] constant(0) CHECK: ROOT {{.*}} = f32[3,2]{1,0} broadcast(%[[ZERO]]), dimensions={} )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1d363942-2352-4331-8226-9a635b5b362f
cpp
tensorflow/tensorflow
collective_quantizer
third_party/xla/xla/service/collective_quantizer.cc
third_party/xla/xla/service/collective_quantizer_test.cc
#include "xla/service/collective_quantizer.h" #include "xla/service/hlo_replication_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/shape_util.h" namespace xla { namespace { namespace m = match; struct ConversionSubgraph { HloInstruction* convert = nullptr; HloInstruction* binary = nullptr; HloInstruction* clamp = nullptr; HloInstruction* scale_bcast = nullptr; std::vector<HloInstruction*> unaries; }; template <typename... Args> auto ScalarBroadcast(Args... args) { return m::Broadcast(args...).WithPredicate([](const HloInstruction* instr) { return ShapeUtil::IsScalar(instr->operand(0)->shape()); }); } auto BitcastPreservesElementType() { return m::Bitcast().WithPredicate([](const HloInstruction* instr) { return ShapeUtil::SameElementType(instr->shape(), instr->operand(0)->shape()); }); } auto ConvertToNarrowerType() { auto converts_to_narrower_type = [](const HloInstruction* instr) -> bool { return ShapeUtil::ByteSizeOfPrimitiveType(instr->shape().element_type()) < ShapeUtil::ByteSizeOfPrimitiveType( instr->operand(0)->shape().element_type()); }; return m::Convert().WithPredicate(converts_to_narrower_type); } auto ConvertToWiderType() { auto converts_to_wider_type = [](const HloInstruction* instr) -> bool { return ShapeUtil::ByteSizeOfPrimitiveType(instr->shape().element_type()) > ShapeUtil::ByteSizeOfPrimitiveType( instr->operand(0)->shape().element_type()); }; return m::Convert().WithPredicate(converts_to_wider_type); } bool IsSupportedCollective(HloInstruction* instr) { return instr->operand_count() == 1 && (instr->opcode() == HloOpcode::kAllGather || instr->opcode() == HloOpcode::kAllToAll || instr->opcode() == HloOpcode::kCollectiveBroadcast || instr->opcode() == HloOpcode::kCollectivePermute); } HloInstruction* ApplyUnaries(HloInstruction* instr, const std::vector<HloInstruction*>& unaries) { for (HloInstruction* unary : unaries) { instr = instr->AddInstruction(unary->CloneWithNewOperands( ShapeUtil::MakeShapeWithDenseLayout( instr->shape().element_type(), unary->shape().dimensions(), unary->shape().layout().minor_to_major()), {instr})); } return instr; } absl::StatusOr<bool> InstrIsReplicated(HloModule* module, HloInstruction* instr) { if (module->config().replica_count() > 1) { return false; } TF_ASSIGN_OR_RETURN( auto replication_analysis, HloReplicationAnalysis::Run(module, true)); return replication_analysis->HloInstructionIsReplicatedAt(instr, {}); } std::vector<HloInstruction*> FindDequantizationSubgraphRecursive( HloInstruction* instr, absl::flat_hash_set<int>& visited_instrs, std::vector<HloInstruction*> subgraph) { if (!visited_instrs.emplace(instr->unique_id()).second) { return {}; } subgraph.emplace_back(instr); if (Match(instr, ConvertToWiderType())) { return subgraph; } if (instr->operand_count() == 1 || instr->opcode() == HloOpcode::kDivide) { return FindDequantizationSubgraphRecursive(instr->mutable_operand(0), visited_instrs, subgraph); } else if (instr->opcode() == HloOpcode::kMultiply) { for (HloInstruction* operand : instr->unique_operands()) { auto binary_subgraph = FindDequantizationSubgraphRecursive( operand, visited_instrs, subgraph); if (!binary_subgraph.empty()) { return binary_subgraph; } } } return {}; } std::optional<ConversionSubgraph> IsSupportedDequantization( HloInstruction* instr) { ConversionSubgraph subgraph; absl::flat_hash_set<int> visited_instrs; std::vector<HloInstruction*> candidate_subgraph = FindDequantizationSubgraphRecursive(instr, visited_instrs, std::vector<HloInstruction*>{}); std::reverse(candidate_subgraph.begin(), candidate_subgraph.end()); if (candidate_subgraph.size() > 1 && (Match( candidate_subgraph[1], m::MultiplyAnyOrder(&subgraph.binary, m::Convert(&subgraph.convert), ScalarBroadcast(&subgraph.scale_bcast))) || Match(candidate_subgraph[1], m::Divide(&subgraph.binary, m::Convert(&subgraph.convert), ScalarBroadcast(&subgraph.scale_bcast))))) { subgraph.unaries = {candidate_subgraph.begin() + 2, candidate_subgraph.end()}; } else if (candidate_subgraph.size() > 0 && Match(candidate_subgraph[0], m::Convert(&subgraph.convert))) { subgraph.unaries = {candidate_subgraph.begin() + 1, candidate_subgraph.end()}; } else { VLOG(5) << "Did not find type conversion or dequantization pattern."; return std::nullopt; } for (HloInstruction* unary : subgraph.unaries) { if (!Match(unary, m::AnyOf<HloInstruction>(m::Bitcast(), m::Copy(), m::Reshape(), m::Slice()))) { VLOG(5) << "Unexpected instruction in unary ops."; return std::nullopt; } } return std::make_optional<ConversionSubgraph>(std::move(subgraph)); } std::optional<ConversionSubgraph> IsSupportedQuantization( HloInstruction* instr) { ConversionSubgraph subgraph; std::vector<HloInstruction*> ops; while (instr->user_count() <= 1) { if (Match(instr, m::AnyOf<HloInstruction>( BitcastPreservesElementType(), m::Copy(), m::Reshape(), m::Slice(), m::Multiply(), m::Divide(), m::Clamp()))) { if (instr->user_count() > 0) { ops.emplace_back(instr); instr = instr->users()[0]; continue; } break; } if (Match(instr, ConvertToNarrowerType())) { ops.emplace_back(instr); break; } VLOG(5) << "Unsupported instruction."; return std::nullopt; } if (ops.size() > 2 && (Match( ops.back(), m::Convert(&subgraph.convert, m::Clamp(&subgraph.clamp, ScalarBroadcast(m::Constant()), m::MultiplyAnyOrder( &subgraph.binary, m::Op(), ScalarBroadcast(&subgraph.scale_bcast)), ScalarBroadcast(m::Constant())))) || Match(ops.back(), m::Convert( &subgraph.convert, m::Clamp(&subgraph.clamp, ScalarBroadcast(m::Constant()), m::Divide(&subgraph.binary, m::Op(), ScalarBroadcast(&subgraph.scale_bcast)), ScalarBroadcast(m::Constant())))))) { subgraph.unaries = {ops.begin(), ops.end() - 3}; } else if (ops.size() > 0 && Match(ops.back(), m::Convert(&subgraph.convert))) { subgraph.unaries = {ops.begin(), ops.end() - 1}; } else { VLOG(5) << "Did not find type conversion or quantization pattern."; return std::nullopt; } for (HloInstruction* unary : subgraph.unaries) { if (!Match(unary, m::AnyOf<HloInstruction>(m::Bitcast(), m::Copy(), m::Reshape(), m::Slice()))) { VLOG(5) << "Unexpected instruction in unary ops."; return std::nullopt; } } return std::make_optional<ConversionSubgraph>(std::move(subgraph)); } absl::Status MatchDequantization(HloInstruction* instr, bool* changed) { std::optional<ConversionSubgraph> subgraph = IsSupportedDequantization(instr->mutable_operand(0)); if (!subgraph.has_value()) { return absl::OkStatus(); } if (subgraph->scale_bcast) { TF_ASSIGN_OR_RETURN( bool scale_is_replicated, InstrIsReplicated(instr->parent()->parent(), subgraph->scale_bcast)); if (!scale_is_replicated) { return absl::OkStatus(); } } HloInstruction* new_coll_operand = subgraph->convert->mutable_operand(0); new_coll_operand = ApplyUnaries(new_coll_operand, subgraph->unaries); Shape new_coll_shape = ShapeUtil::ChangeElementType( instr->shape(), new_coll_operand->shape().element_type()); HloInstruction* new_collective = instr->AddInstruction( instr->CloneWithNewOperands(new_coll_shape, {new_coll_operand})); Shape new_convert_shape = ShapeUtil::ChangeElementType( new_collective->shape(), subgraph->convert->shape().element_type()); HloInstruction* new_convert = instr->AddInstruction(subgraph->convert->CloneWithNewOperands( new_convert_shape, {new_collective})); HloInstruction* new_binary; if (subgraph->binary) { HloInstruction* new_scale_bcast = instr->AddInstruction( subgraph->scale_bcast->CloneWithNewShape(new_convert->shape())); new_binary = instr->AddInstruction(subgraph->binary->CloneWithNewOperands( new_convert->shape(), {new_convert, new_scale_bcast})); } TF_RETURN_IF_ERROR( instr->ReplaceAllUsesWith(subgraph->binary ? new_binary : new_convert)); *changed = true; VLOG(5) << "Quantized collective " << new_collective->ToShortString(); return absl::OkStatus(); } absl::Status MatchQuantization(HloInstruction* instr, bool* changed) { std::optional<ConversionSubgraph> subgraph; if (instr->user_count() == 1) { subgraph = IsSupportedQuantization(instr->users()[0]); } if (!subgraph.has_value()) { return absl::OkStatus(); } if (subgraph->scale_bcast) { TF_ASSIGN_OR_RETURN( bool scale_is_replicated, InstrIsReplicated(instr->parent()->parent(), subgraph->scale_bcast)); if (!scale_is_replicated) { return absl::OkStatus(); } } HloInstruction* coll_operand = instr->mutable_operand(0); HloInstruction *new_binary, *new_clamp; if (subgraph->binary) { HloInstruction* new_scale_bcast = instr->AddInstruction( subgraph->scale_bcast->CloneWithNewShape(coll_operand->shape())); new_binary = instr->AddInstruction(subgraph->binary->CloneWithNewOperands( coll_operand->shape(), {coll_operand, new_scale_bcast})); HloInstruction* new_clamp_lower = instr->AddInstruction( subgraph->clamp->operand(0)->CloneWithNewShape(coll_operand->shape())); HloInstruction* new_clamp_upper = instr->AddInstruction( subgraph->clamp->operand(2)->CloneWithNewShape(coll_operand->shape())); new_clamp = instr->AddInstruction(subgraph->clamp->CloneWithNewOperands( coll_operand->shape(), {new_clamp_lower, new_binary, new_clamp_upper})); } Shape new_convert_shape = ShapeUtil::ChangeElementType( coll_operand->shape(), subgraph->convert->shape().element_type()); HloInstruction* new_convert = instr->AddInstruction(subgraph->convert->CloneWithNewOperands( new_convert_shape, {subgraph->binary ? new_clamp : coll_operand})); Shape new_collective_shape = ShapeUtil::ChangeElementType( instr->shape(), subgraph->convert->shape().element_type()); HloInstruction* new_collective = instr->AddInstruction( instr->CloneWithNewOperands(new_collective_shape, {new_convert})); new_collective = ApplyUnaries(new_collective, subgraph->unaries); TF_RETURN_IF_ERROR(subgraph->convert->ReplaceAllUsesWith(new_collective)); *changed = true; VLOG(5) << "Quantized collective " << new_collective->ToShortString(); return absl::OkStatus(); } } absl::StatusOr<bool> CollectiveQuantizer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeComputationPostOrder()) { for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { if (IsSupportedCollective(instr)) { TF_RETURN_IF_ERROR(MatchDequantization(instr, &changed)); TF_RETURN_IF_ERROR(MatchQuantization(instr, &changed)); } } } return changed; } }
#include "xla/service/collective_quantizer.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_verifier.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class CollectiveQuantizerTest : public HloTestBase { public: absl::StatusOr<bool> RunCollectiveQuantizer(HloModule* module) { CollectiveQuantizer collective_quantizer; return collective_quantizer.Run(module, {}); } }; TEST_F(CollectiveQuantizerTest, AllGatherConvert) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,4,8,128] parameter(0) all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 ROOT convert = f8e4m3fn[8,32,8,128] convert(all-gather) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllGather(op::Convert(op::Parameter()))); HloInstruction* all_gather = module->entry_computation()->root_instruction(); EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, AllGatherConvertUnary) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,4,8,128] parameter(0) all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 reshape = bf16[8,32,1024] reshape(all-gather) slice = bf16[8,32,512] slice(reshape), slice={[0:8], [0:32], [256:768]} ROOT convert = f8e4m3fn[8,32,512] convert(slice) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Slice(op::Reshape(op::AllGather(op::Convert(op::Parameter()))))); HloInstruction* all_gather = module->entry_computation()->root_instruction(); EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, AllGatherQuantize) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,4,8,128] parameter(0) all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} divide = bf16[8,32,8,128] divide(all-gather, scale_bcast) clamp_lower = bf16[] constant(-448.0) clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={} clamp_upper = bf16[] constant(448.0) clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={} clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast) ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllGather(op::Convert(op::Clamp( op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()), op::Broadcast())))); HloInstruction* all_gather = module->entry_computation()->root_instruction(); EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, AllToAllQuantize) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,32,8,128] parameter(0) all-to-all = bf16[8,32,8,128] all-to-all(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} divide = bf16[8,32,8,128] divide(all-to-all, scale_bcast) clamp_lower = bf16[] constant(-448.0) clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={} clamp_upper = bf16[] constant(448.0) clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={} clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast) ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllToAll(op::Convert(op::Clamp( op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()), op::Broadcast())))); HloInstruction* all_to_all = module->entry_computation()->root_instruction(); EXPECT_THAT(all_to_all->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, CollectiveBroadcastQuantize) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,32,8,128] parameter(0) collective-broadcast = bf16[8,32,8,128] collective-broadcast(param), replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} divide = bf16[8,32,8,128] divide(collective-broadcast, scale_bcast) clamp_lower = bf16[] constant(-448.0) clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={} clamp_upper = bf16[] constant(448.0) clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={} clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast) ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::CollectiveBroadcast(op::Convert(op::Clamp( op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()), op::Broadcast())))); HloInstruction* collective_broadcast = module->entry_computation()->root_instruction(); EXPECT_THAT(collective_broadcast->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, CollectivePermuteQuantize) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,32,8,128] parameter(0) collective-permute = bf16[8,32,8,128] collective-permute(param), source_target_pairs={{0,1},{2,3},{4,5},{6,7}}, channel_id=1 scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} divide = bf16[8,32,8,128] divide(collective-permute, scale_bcast) clamp_lower = bf16[] constant(-448.0) clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={} clamp_upper = bf16[] constant(448.0) clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={} clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast) ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::CollectivePermute(op::Convert(op::Clamp( op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()), op::Broadcast())))); HloInstruction* collective_permute = module->entry_computation()->root_instruction(); EXPECT_THAT(collective_permute->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, AllGatherQuantizeUnary) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,4,8,128] parameter(0) all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 reshape = bf16[8,32,1024] reshape(all-gather) slice = bf16[8,32,512] slice(reshape), slice={[0:8], [0:32], [256:768]} scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,512] broadcast(scale), dimensions={} divide = bf16[8,32,512] divide(slice, scale_bcast) clamp_lower = bf16[] constant(-448.0) clamp_lower_bcast = bf16[8,32,512] broadcast(clamp_lower), dimensions={} clamp_upper = bf16[] constant(448.0) clamp_upper_bcast = bf16[8,32,512] broadcast(clamp_upper), dimensions={} clamp = bf16[8,32,512] clamp(clamp_lower_bcast, divide, clamp_upper_bcast) ROOT convert = f8e4m3fn[8,32,512] convert(clamp) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Slice(op::Reshape(op::AllGather(op::Convert(op::Clamp( op::Broadcast(), op::Divide(op::Parameter(), op::Broadcast()), op::Broadcast())))))); HloInstruction* slice = module->entry_computation()->root_instruction(); EXPECT_THAT(slice->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, AllGatherQuantizeMultiUser) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,4,8,128] parameter(0) all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} divide = bf16[8,32,8,128] divide(all-gather, scale_bcast) clamp_lower = bf16[] constant(-448.0) clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={} clamp_upper = bf16[] constant(448.0) clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={} clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast) add = bf16[8,32,8,128] add(divide, clamp) ROOT convert = f8e4m3fn[8,32,8,128] convert(add) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectiveQuantizerTest, AllGatherQuantizeNonReplicatedScale) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,4,8,128] parameter(0) all-gather = bf16[8,32,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 scale = bf16[] parameter(1) scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} divide = bf16[8,32,8,128] divide(all-gather, scale_bcast) clamp_lower = bf16[] constant(-448.0) clamp_lower_bcast = bf16[8,32,8,128] broadcast(clamp_lower), dimensions={} clamp_upper = bf16[] constant(448.0) clamp_upper_bcast = bf16[8,32,8,128] broadcast(clamp_upper), dimensions={} clamp = bf16[8,32,8,128] clamp(clamp_lower_bcast, divide, clamp_upper_bcast) ROOT convert = f8e4m3fn[8,32,8,128] convert(clamp) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectiveQuantizerTest, ConvertAllGather) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = f8e4m3fn[8,4,8,128] parameter(0) convert = bf16[8,4,8,128] convert(param) ROOT all-gather = bf16[8,32,8,128] all-gather(convert), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Convert(op::AllGather(op::Parameter()))); const HloInstruction* all_gather = module->entry_computation()->root_instruction()->operand(0); EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, ConvertAllGatherUnary) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = f8e4m3fn[8,4,8,128] parameter(0) convert = bf16[8,4,8,128] convert(param) reshape = bf16[8,4,1024] reshape(convert) slice = bf16[8,4,512] slice(reshape), slice={[0:8], [0:4], [256:768]} ROOT all-gather = bf16[8,32,512] all-gather(slice), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Convert(op::AllGather(op::Slice(op::Reshape(op::Parameter()))))); const HloInstruction* all_gather = module->entry_computation()->root_instruction()->operand(0); EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, DequantizeAllGather) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = f8e4m3fn[8,4,8,128] parameter(0) convert = bf16[8,4,8,128] convert(param) scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,4,8,128] broadcast(scale), dimensions={} multiply = bf16[8,4,8,128] multiply(convert, scale_bcast) ROOT all-gather = bf16[8,32,8,128] all-gather(multiply), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Multiply(op::Convert(op::AllGather(op::Parameter())), op::Broadcast())); const HloInstruction* all_gather = module->entry_computation()->root_instruction()->operand(0)->operand(0); EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, DequantizeAllToAll) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = f8e4m3fn[8,32,8,128] parameter(0) convert = bf16[8,32,8,128] convert(param) scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} multiply = bf16[8,32,8,128] multiply(convert, scale_bcast) ROOT all-to-all = bf16[8,32,8,128] all-to-all(multiply), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Multiply(op::Convert(op::AllToAll(op::Parameter())), op::Broadcast())); const HloInstruction* all_to_all = module->entry_computation()->root_instruction()->operand(0)->operand(0); EXPECT_THAT(all_to_all->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, DequantizeCollectiveBroadcast) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = f8e4m3fn[8,32,8,128] parameter(0) convert = bf16[8,32,8,128] convert(param) scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} multiply = bf16[8,32,8,128] multiply(convert, scale_bcast) ROOT collective-broadcast = bf16[8,32,8,128] collective-broadcast(multiply), replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Multiply(op::Convert(op::CollectiveBroadcast(op::Parameter())), op::Broadcast())); const HloInstruction* collective_broadcast = module->entry_computation()->root_instruction()->operand(0)->operand(0); EXPECT_THAT(collective_broadcast->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, DequantizeCollectivePermute) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = f8e4m3fn[8,32,8,128] parameter(0) convert = bf16[8,32,8,128] convert(param) scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,32,8,128] broadcast(scale), dimensions={} multiply = bf16[8,32,8,128] multiply(convert, scale_bcast) ROOT collective-permute = bf16[8,32,8,128] collective-permute(multiply), source_target_pairs={{0,1},{2,3},{4,5},{6,7}}, channel_id=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Multiply(op::Convert(op::CollectivePermute(op::Parameter())), op::Broadcast())); const HloInstruction* collective_permute = module->entry_computation()->root_instruction()->operand(0)->operand(0); EXPECT_THAT(collective_permute->shape().element_type(), F8E4M3FN); } TEST_F(CollectiveQuantizerTest, DequantizeAllGatherUnary) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = f8e4m3fn[8,4,8,128] parameter(0) convert = bf16[8,4,8,128] convert(param) scale = bf16[] parameter(1), sharding={replicated} scale_bcast = bf16[8,4,8,128] broadcast(scale), dimensions={} multiply = bf16[8,4,8,128] multiply(convert, scale_bcast) reshape = bf16[8,4,1024] reshape(multiply) slice = bf16[8,4,512] slice(reshape), slice={[0:8], [0:4], [256:768]} ROOT all-gather = bf16[8,32,512] all-gather(slice), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveQuantizer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Multiply( op::Convert(op::AllGather(op::Slice(op::Reshape(op::Parameter())))), op::Broadcast())); HloInstruction* all_gather = module->entry_computation() ->root_instruction() ->mutable_operand(0) ->mutable_operand(0); EXPECT_THAT(all_gather->shape().element_type(), F8E4M3FN); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_quantizer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_quantizer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9a20636e-d0bb-4a0d-8c31-35ca2b50c535
cpp
tensorflow/tensorflow
while_loop_simplifier
third_party/xla/xla/service/while_loop_simplifier.cc
third_party/xla/xla/service/while_loop_simplifier_test.cc
#include "xla/service/while_loop_simplifier.h" #include <cstdint> #include <optional> #include <utility> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/literal_util.h" #include "xla/primitive_util.h" #include "xla/service/call_inliner.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/hlo_dce.h" #include "xla/service/pattern_matcher.h" #include "xla/service/while_loop_analysis.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/union_find.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace m = match; using hlo_query::ContainsInstrWithOpcode; using std::optional; static absl::StatusOr<bool> TryRemoveTrivialCompare(HloInstruction* while_op) { std::optional<int64_t> indvar_index = GetLoopInductionVarTupleIdx(while_op); if (indvar_index.has_value()) { if (while_op->operand(0)->operand(*indvar_index)->IsConstant()) { const HloConstantInstruction* init_value_hlo = Cast<HloConstantInstruction>( while_op->operand(0)->operand(*indvar_index)); std::optional<int64_t> trip_count = MatchTrivialLoopTripCount( while_op, indvar_index.value(), init_value_hlo->literal()); if (trip_count.has_value()) { std::optional<int64_t> init_value = LiteralUtil::LiteralAsScalarInt64(init_value_hlo->literal()); for (HloInstruction* body_instr : while_op->while_body()->instructions()) { HloInstruction* constant; if (Match(body_instr, m::Compare(m::GetTupleElement(m::Parameter(), indvar_index.value()), m::Constant(&constant).IsConstantScalar()))) { std::optional<int64_t> constant_value = LiteralUtil::LiteralAsScalarInt64(constant->literal()); if (constant_value.has_value()) { if (constant_value.value() <= init_value.value()) { if (body_instr->comparison_direction() == ComparisonDirection::kLt) { TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction( body_instr, MakeScalarLike(body_instr, false))); return true; } else if (body_instr->comparison_direction() == ComparisonDirection::kGt) { TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction( body_instr, MakeScalarLike(body_instr, true))); return true; } } if (constant_value.value() >= init_value.value() + trip_count.value()) { if (body_instr->comparison_direction() == ComparisonDirection::kLt) { TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction( body_instr, MakeScalarLike(body_instr, true))); return true; } else if (body_instr->comparison_direction() == ComparisonDirection::kGt) { TF_RETURN_IF_ERROR(while_op->while_body()->ReplaceInstruction( body_instr, MakeScalarLike(body_instr, false))); return true; } } } } } } } } return false; } void CopyFrontendAttributes(HloInstruction* old_while_op, HloInstruction* new_while_op) { new_while_op->add_frontend_attributes(old_while_op->frontend_attributes()); } void CopyMetadata(HloInstruction* old_while_op, HloInstruction* new_while_op) { new_while_op->set_metadata(old_while_op->metadata()); } static absl::StatusOr<HloInstruction*> RemoveDeadTupleIndices( HloInstruction* while_op, absl::flat_hash_set<int64_t>& used_tuple_indices, int64_t index_for_replaced = -1) { std::vector<int64_t> new_to_old_tuple_idx(used_tuple_indices.begin(), used_tuple_indices.end()); absl::c_sort(new_to_old_tuple_idx); HloModule* module = while_op->GetModule(); HloComputation* computation = while_op->parent(); HloInstruction* while_init = while_op->mutable_operand(0); HloComputation* while_cond = while_op->while_condition(); HloComputation* while_body = while_op->while_body(); HloInstruction* while_body_root = while_body->root_instruction(); auto print_no_metadata = HloPrintOptions().set_print_metadata(false); absl::flat_hash_map<int64_t, int64_t> old_to_new_tuple_idx; for (int64_t new_idx = 0; new_idx < new_to_old_tuple_idx.size(); ++new_idx) { int64_t old_idx = new_to_old_tuple_idx[new_idx]; old_to_new_tuple_idx[old_idx] = new_idx; VLOG(2) << "Remapping tuple index " << old_idx << " to " << new_idx; } std::vector<const Shape*> new_while_tuple_elem_shapes; new_while_tuple_elem_shapes.reserve(new_to_old_tuple_idx.size()); for (int64_t old_idx : new_to_old_tuple_idx) { new_while_tuple_elem_shapes.push_back( &while_init->shape().tuple_shapes(old_idx)); } Shape new_while_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_while_tuple_elem_shapes); auto make_while_computation_replacements = [&](const HloComputation* comp) { absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>> replacements; auto* param = comp->parameter_instruction(0); replacements.emplace(param, HloInstruction::CreateParameter( 0, new_while_shape, param->name())); std::vector<HloInstruction*> materialized_users(param->users().begin(), param->users().end()); for (const auto* user : materialized_users) { if (user == while_body_root) { continue; } CHECK_EQ(user->opcode(), HloOpcode::kGetTupleElement) << user->ToString(print_no_metadata); int64_t old_idx = user->tuple_index(); auto new_idx_iter = old_to_new_tuple_idx.find(old_idx); if (new_idx_iter != old_to_new_tuple_idx.end()) { replacements.emplace( user, HloInstruction::CreateGetTupleElement(user->shape(), param, new_idx_iter->second)); } else { replacements.emplace(user, nullptr); } } for (const auto* hlo : comp->MakeInstructionPostOrder()) { if (hlo == comp->root_instruction() || replacements.contains(hlo)) { continue; } for (const auto* operand : hlo->operands()) { auto op_it = replacements.find(operand); if (op_it != replacements.end() && op_it->second == nullptr) { replacements[hlo] = nullptr; break; } } } return replacements; }; absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>> while_cond_replacements = make_while_computation_replacements(while_cond); std::unique_ptr<HloComputation> new_while_cond = while_cond->CloneWithReplacements(&while_cond_replacements); absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>> while_body_replacements = make_while_computation_replacements(while_body); std::vector<HloInstruction*> new_while_body_root_elems; new_while_body_root_elems.reserve(new_to_old_tuple_idx.size()); for (int64_t old_idx : new_to_old_tuple_idx) { new_while_body_root_elems.push_back( while_body_root->mutable_operand(old_idx)); } while_body_replacements.emplace( while_body_root, HloInstruction::CreateTuple(new_while_body_root_elems)); std::unique_ptr<HloComputation> new_while_body = while_body->CloneWithReplacements(&while_body_replacements); std::vector<HloInstruction*> new_while_init_elems; new_while_init_elems.reserve(new_to_old_tuple_idx.size()); for (int64_t old_idx : new_to_old_tuple_idx) { new_while_init_elems.push_back( computation->AddInstruction(HloInstruction::CreateGetTupleElement( while_init->shape().tuple_shapes(old_idx), while_init, old_idx))); } auto* new_while_init = computation->AddInstruction( HloInstruction::CreateTuple(new_while_init_elems)); auto* new_while_op = computation->AddInstruction(HloInstruction::CreateWhile( new_while_shape, module->AddEmbeddedComputation(std::move(new_while_cond)), module->AddEmbeddedComputation(std::move(new_while_body)), new_while_init)); new_while_op->CopyBackendConfigFrom(while_op); CopyFrontendAttributes(while_op, new_while_op); CopyMetadata(while_op, new_while_op); std::vector<HloInstruction*> new_tuple_elems; const int64_t tuple_size = ShapeUtil::TupleElementCount(while_init->shape()); for (int64_t old_idx = 0; old_idx < tuple_size; ++old_idx) { auto new_tuple_idx_it = old_to_new_tuple_idx.find(old_idx); if (new_tuple_idx_it != old_to_new_tuple_idx.end() || index_for_replaced != -1) { int64_t gte_idx = new_tuple_idx_it != old_to_new_tuple_idx.end() ? new_tuple_idx_it->second : index_for_replaced; new_tuple_elems.push_back( computation->AddInstruction(HloInstruction::CreateGetTupleElement( new_while_op->shape().tuple_shapes(gte_idx), new_while_op, gte_idx))); } else { new_tuple_elems.push_back( computation->AddInstruction(HloInstruction::CreateGetTupleElement( while_init->shape().tuple_shapes(old_idx), while_init, old_idx))); } } HloInstruction* new_tuple = computation->AddInstruction(HloInstruction::CreateTuple(new_tuple_elems)); TF_RETURN_IF_ERROR(computation->ReplaceInstruction(while_op, new_tuple)); return new_while_op; } absl::StatusOr<bool> TryRemoveDeadWhileParams(HloInstruction* while_op) { CHECK_EQ(while_op->opcode(), HloOpcode::kWhile); if (!while_op->parent()->IsSafelyRemovable(while_op)) { VLOG(2) << "Can't remove dead parameters from non-removable while op."; return false; } HloInstruction* while_init = while_op->mutable_operand(0); HloComputation* while_cond = while_op->while_condition(); HloComputation* while_body = while_op->while_body(); HloInstruction* while_body_root = while_body->root_instruction(); if (!while_init->shape().IsTuple()) { VLOG(2) << "While op's carried value isn't tuple shaped."; return false; } if (while_body_root->opcode() != HloOpcode::kTuple) { VLOG(2) << "While body's root is not a tuple(...) instruction."; return false; } const int64_t tuple_size = ShapeUtil::TupleElementCount(while_init->shape()); auto print_no_metadata = HloPrintOptions().set_print_metadata(false); absl::flat_hash_set<int64_t> used_tuple_indices; for (int64_t i = 0; i < tuple_size; ++i) { used_tuple_indices.insert(i); } for (const HloInstruction* instr : {while_body->parameter_instruction(0), while_cond->parameter_instruction(0)}) { for (const HloInstruction* user : instr->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { VLOG(2) << "Cowardly refusing to analyze while loop with " << instr->ToString(print_no_metadata) << " used by non-GTE instruction " << user->ToString(print_no_metadata) << " in computation " << instr->parent()->name(); return false; } } } if (tuple_size == 0) { VLOG(2) << "Can't remove elements from while loop's tuple -- it's already " "empty."; return false; } absl::flat_hash_set<int64_t> used_indices_after_loop; if (while_op == while_op->parent()->root_instruction()) { for (int64_t i = 0; i < while_body_root->operand_count(); ++i) { used_indices_after_loop.insert(i); } } for (auto user : while_op->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { for (int64_t i = 0; i < while_body_root->operand_count(); ++i) { used_indices_after_loop.insert(i); } break; } used_indices_after_loop.insert(user->tuple_index()); } struct InputIndicesSet { void Merge(const InputIndicesSet& other) { if (all.size() + other.all.size() <= all.capacity() && owned == nullptr) { absl::c_copy(other.all, std::back_inserter(all)); return; } if (owned == nullptr) { owned = std::make_unique<absl::flat_hash_set<int64_t>>(); owned->reserve(other.all.front()->size() * 2); } for (auto* deps : all) { if (deps == owned.get()) { continue; } owned->insert(deps->begin(), deps->end()); } for (auto* deps : other.all) { owned->insert(deps->begin(), deps->end()); } all.clear(); all.push_back(owned.get()); } void Add(int64_t index) { if (owned == nullptr) { CHECK(all.empty()); owned = std::make_unique<absl::flat_hash_set<int64_t>>(); all.push_back(owned.get()); } owned->insert(index); } std::unique_ptr<absl::flat_hash_set<int64_t>> owned; absl::InlinedVector<const absl::flat_hash_set<int64_t>*, 4> all; }; absl::flat_hash_map<HloInstruction*, InputIndicesSet> inst_input_deps; absl::flat_hash_map<HloInstruction*, UnionFind<HloInstruction*>> disjoint_sets; for (HloComputation* comp : {while_body, while_cond}) { HloInstruction* while_input = comp->parameter_instruction(0); for (HloInstruction* inst : comp->instructions()) { if (inst == while_input || inst == while_body_root) { continue; } disjoint_sets[inst].Get() = inst; } } absl::flat_hash_set<int64_t> side_effecting_indices; for (HloComputation* comp : {while_body, while_cond}) { HloInstruction* while_input = comp->parameter_instruction(0); for (HloInstruction* inst : comp->MakeInstructionPostOrder()) { if (inst == while_input || inst == while_body_root) { continue; } auto& deps = inst_input_deps[inst]; auto& my_set = disjoint_sets[inst]; if (inst->opcode() == HloOpcode::kGetTupleElement && inst->operand(0) == while_input) { deps.Add(inst->tuple_index()); HloInstruction* output = while_body_root->mutable_operand(inst->tuple_index()); if (output != inst) { disjoint_sets[output].Merge(&my_set); } } else { for (HloInstruction* operand : inst->operands()) { disjoint_sets[operand].Merge(&my_set); deps.Merge(inst_input_deps[operand]); } } if (inst->HasSideEffect() || inst == while_cond->root_instruction()) { for (auto* dep : deps.all) { side_effecting_indices.insert(dep->begin(), dep->end()); } } } } absl::flat_hash_set<int64_t> indices_affecting_others; for (int64_t i = 0; i < tuple_size; ++i) { HloInstruction* output = while_body_root->mutable_operand(i); for (auto* deps : inst_input_deps[output].all) { for (int64_t index : *deps) { if (index != i) { indices_affecting_others.insert(index); } } } } for (int64_t i = 0; i < tuple_size; ++i) { if (!indices_affecting_others.contains(i) && !used_indices_after_loop.contains(i) && !side_effecting_indices.contains(i)) { VLOG(2) << "Remove with dependencies " << i; used_tuple_indices.erase(i); } } absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<int64_t>> groups; for (int64_t i = 0; i < tuple_size; ++i) { HloInstruction* output = while_body_root->mutable_operand(i); groups[disjoint_sets[output].Get()].insert(i); } for (HloComputation* comp : {while_body, while_cond}) { HloInstruction* while_input = comp->parameter_instruction(0); for (HloInstruction* gte : while_input->users()) { groups[disjoint_sets[gte].Get()].insert(gte->tuple_index()); } } for (const auto& group : groups) { if (absl::c_any_of(group.second, [&](int64_t index) { const HloInstruction* output = while_body_root->operand(index); return side_effecting_indices.contains(index) || (used_indices_after_loop.contains(index) && !(output->opcode() == HloOpcode::kGetTupleElement && output->operand(0) == while_body->parameter_instruction(0) && output->tuple_index() == index)); })) { continue; } VLOG(2) << "Remove with groups:"; for (int64_t index : group.second) { VLOG(2) << " index " << index; used_tuple_indices.erase(index); } } if (used_tuple_indices.size() == tuple_size) { VLOG(2) << "Loop " << while_op->ToString(print_no_metadata) << " uses all of its inputs; no simplification possible."; return false; } CHECK_LT(used_tuple_indices.size(), tuple_size); VLOG(1) << "Eliminating " << tuple_size - used_tuple_indices.size() << " elements from tuple of " << while_op->ToString(print_no_metadata); TF_ASSIGN_OR_RETURN(while_op, RemoveDeadTupleIndices(while_op, used_tuple_indices)); return true; } static absl::StatusOr<HloInstruction*> TryRemoveRepeatedWhileTupleIndicesHelper( HloInstruction* while_op, const int64_t tuple_index, bool replace_with_init, absl::flat_hash_set<int64_t>& duplicates) { HloComputation* while_cond = while_op->while_condition(); HloComputation* while_body = while_op->while_body(); HloInstruction* while_init = while_op->mutable_operand(0); VLOG(2) << "while_init " << while_init->ToString() << " operands " << while_init->operand_count(); VLOG(2) << "while_body_root " << while_body->root_instruction()->ToString() << " operands " << while_body->root_instruction()->operand_count(); for (HloComputation* comp : {while_body, while_cond}) { auto new_get = comp->AddInstruction(HloInstruction::CreateGetTupleElement( comp->parameter_instruction(0)->shape().tuple_shapes(tuple_index), comp->parameter_instruction(0), tuple_index)); std::vector<HloInstruction*> instrs_to_replace; for (auto* instr : comp->instructions()) { if (instr->opcode() == HloOpcode::kGetTupleElement && duplicates.contains(instr->tuple_index()) && instr->operand(0) == comp->parameter_instruction(0)) { instrs_to_replace.push_back(instr); } } for (auto instr : instrs_to_replace) { TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_get)); } } absl::flat_hash_set<int64_t> used_tuple_indices; for (int index = 0; index < while_init->shape().tuple_shapes_size(); ++index) { if (!duplicates.count(index)) { used_tuple_indices.insert(index); } } TF_ASSIGN_OR_RETURN( while_op, RemoveDeadTupleIndices(while_op, used_tuple_indices, replace_with_init ? -1 : tuple_index)); return while_op; } static bool IsDynamicUpdateSliceWhileInsertion( const HloInstruction* instr, const HloComputation* while_body) { return instr->opcode() == HloOpcode::kDynamicUpdateSlice && instr->operand(0)->opcode() == HloOpcode::kGetTupleElement && instr->operand(0)->operand(0) == while_body->parameter_instruction(0); } static absl::StatusOr<bool> TryRemoveRepeatedWhileTupleIndices( HloInstruction* while_op) { CHECK_EQ(while_op->opcode(), HloOpcode::kWhile); int index_to_investigate = 0; if (!while_op->parent()->IsSafelyRemovable(while_op)) { VLOG(2) << "Can't remove dead parameters from non-removable while op."; return false; } HloInstruction* while_init = while_op->mutable_operand(0); HloComputation* while_cond = while_op->while_condition(); HloComputation* while_body = while_op->while_body(); HloInstruction* while_body_root = while_body->root_instruction(); if (!while_init->shape().IsTuple()) { VLOG(2) << "While op's carried value isn't tuple shaped."; return false; } bool changed = false; while (index_to_investigate < while_init->shape().tuple_shapes_size()) { if (!while_init->shape().IsTuple() || while_init->opcode() != HloOpcode::kTuple) { VLOG(2) << "While op's carried value isn't tuple shaped."; return false; } if (while_body_root->opcode() != HloOpcode::kTuple) { VLOG(2) << "While body's root is not a tuple(...) instruction."; return false; } auto& while_shape = while_init->shape(); VLOG(2) << "Iterating " << index_to_investigate; absl::flat_hash_set<int64_t> duplicates; auto* pivot_init_elem = while_init->operand(index_to_investigate); auto* pivot_body_elem = while_body_root->operand(index_to_investigate); bool replace_with_init = true; if (pivot_body_elem->opcode() == HloOpcode::kGetTupleElement && pivot_body_elem->operand(0) == while_body->parameter_instruction(0)) { if (pivot_body_elem->tuple_index() != index_to_investigate) { VLOG(2) << "Mismatch between pivot_body_elem->tuple_index() " << pivot_body_elem->tuple_index() << " index_to_investigate " << index_to_investigate; index_to_investigate++; continue; } } else if (IsDynamicUpdateSliceWhileInsertion(pivot_body_elem, while_body)) { if (pivot_body_elem->operand(0)->tuple_index() != index_to_investigate) { VLOG(2) << "Mismatch between pivot_body_elem->operand(0)->tuple_index() " << pivot_body_elem->operand(0)->tuple_index() << " index_to_investigate " << index_to_investigate; index_to_investigate++; continue; } } else { index_to_investigate++; continue; } for (int64_t i = index_to_investigate + 1; i < while_shape.tuple_shapes_size(); ++i) { auto* init_elem = while_init->operand(i); auto* body_elem = while_body_root->operand(i); if (pivot_body_elem->opcode() == HloOpcode::kGetTupleElement && body_elem->opcode() == HloOpcode::kGetTupleElement && body_elem->operand(0) == while_body->parameter_instruction(0)) { if (body_elem->tuple_index() != i) { VLOG(2) << "Mismatch between body_elem->tuple_index() " << body_elem->tuple_index() << " i " << i; continue; } } else if (IsDynamicUpdateSliceWhileInsertion(pivot_body_elem, while_body) && IsDynamicUpdateSliceWhileInsertion(body_elem, while_body)) { if (pivot_body_elem->operand_count() != body_elem->operand_count()) { VLOG(2) << "Mismatch in operand count of dynamic-update-slice " << pivot_body_elem->operand_count() << " vs " << body_elem->operand_count(); continue; } if (body_elem->operand(0)->tuple_index() != i) { VLOG(2) << "Mismatch between body_elem->operand(0)->tuple_index() " << body_elem->tuple_index() << " i " << i; continue; } if (pivot_body_elem->operand(0) == body_elem->operand(0)) { VLOG(2) << "Inserting in the same input index"; continue; } bool mismatch = false; for (int64_t i = 1; i < body_elem->operand_count(); ++i) { if (body_elem->operand(i) != pivot_body_elem->operand(i)) { VLOG(2) << "Mismatch in insertion indices or values"; mismatch = true; break; } } if (mismatch) { continue; } replace_with_init = false; } else { continue; } if (pivot_init_elem == init_elem) { VLOG(2) << "init_elem " << init_elem->ToString() << " pivot_init_elem " << pivot_init_elem->ToString(); VLOG(2) << "body_elem " << body_elem->ToString() << " pivot_body_elem " << pivot_body_elem->ToString(); duplicates.insert(i); } } if (!duplicates.empty()) { VLOG(2) << "Duplicate found " << duplicates.size() << " pivot_init " << pivot_init_elem->ToString(); TF_ASSIGN_OR_RETURN(while_op, TryRemoveRepeatedWhileTupleIndicesHelper( while_op, index_to_investigate, replace_with_init, duplicates)); changed = true; VLOG(2) << "Changed while_op " << while_op->ToString() << " while_op operand count " << while_op->operand_count(); while_init = while_op->mutable_operand(0); while_cond = while_op->while_condition(); while_body = while_op->while_body(); while_body_root = while_body->root_instruction(); } index_to_investigate++; } return changed; } static absl::StatusOr<bool> TryRemoveConstantParams(HloInstruction* while_op) { HloModule* module = while_op->GetModule(); HloComputation* computation = while_op->parent(); auto* while_init = while_op->mutable_operand(0); auto* while_body = while_op->while_body(); auto* while_cond = while_op->while_condition(); auto* while_body_root = while_body->root_instruction(); if (while_init->opcode() != HloOpcode::kTuple || while_body_root->opcode() != HloOpcode::kTuple) { return false; } TF_RET_CHECK(while_cond->num_parameters() == 1); TF_RET_CHECK(while_body->num_parameters() == 1); TF_RET_CHECK( ShapeUtil::Compatible(while_init->shape(), while_body_root->shape())); absl::flat_hash_set<int64_t> constant_tuple_indices; const auto& while_shape = while_init->shape(); for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { auto* init_elem = while_init->operand(i); auto* body_elem = while_body_root->operand(i); if (init_elem->opcode() == HloOpcode::kConstant && body_elem->opcode() == HloOpcode::kConstant && init_elem->literal() == body_elem->literal()) { constant_tuple_indices.insert(i); } } if (constant_tuple_indices.empty()) { return false; } std::vector<const Shape*> new_while_shape_elems; for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { if (!constant_tuple_indices.count(i)) { new_while_shape_elems.push_back(&while_shape.tuple_shapes(i)); } } Shape new_while_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_while_shape_elems); std::vector<std::unique_ptr<HloInstruction>> new_instrs; auto add_new_instr = [&](std::unique_ptr<HloInstruction> instr) { new_instrs.push_back(std::move(instr)); return new_instrs.back().get(); }; auto remove_constant_elems = [&](HloInstruction* instr) { CHECK(ShapeUtil::Compatible(instr->shape(), while_shape)); std::vector<HloInstruction*> tuple_elems; for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { if (!constant_tuple_indices.count(i)) { tuple_elems.push_back( add_new_instr(HloInstruction::CreateGetTupleElement( while_shape.tuple_shapes(i), instr, i))); } } return HloInstruction::CreateTuple(tuple_elems); }; auto add_constant_elems = [&](HloInstruction* instr) { CHECK(ShapeUtil::Compatible(instr->shape(), new_while_shape)); std::vector<HloInstruction*> tuple_elems; int64_t j = 0; for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { if (constant_tuple_indices.count(i)) { tuple_elems.push_back(while_init->mutable_operand(i)); } else { tuple_elems.push_back( add_new_instr(HloInstruction::CreateGetTupleElement( while_shape.tuple_shapes(i), instr, j))); ++j; } } return HloInstruction::CreateTuple(tuple_elems); }; if (ShapeUtil::IsEmptyTuple(new_while_shape)) { TF_RETURN_IF_ERROR(computation->ReplaceInstruction(while_op, while_init)); return true; } std::unique_ptr<HloComputation> new_while_cond = while_cond->CloneWithReplacementPairs({ while_cond->parameter_instruction(0), add_constant_elems(add_new_instr(HloInstruction::CreateParameter( 0, new_while_shape, while_cond->parameter_instruction(0)->name()))), }); std::unique_ptr<HloComputation> new_while_body = while_body->CloneWithReplacementPairs( { while_body->parameter_instruction(0), add_constant_elems(add_new_instr(HloInstruction::CreateParameter( 0, new_while_shape, while_cond->parameter_instruction(0)->name()))), }, { while_body->root_instruction(), remove_constant_elems( add_new_instr(while_body->root_instruction()->Clone())), }); new_instrs.clear(); auto* new_while_op = computation->AddInstruction(HloInstruction::CreateWhile( new_while_shape, module->AddEmbeddedComputation(std::move(new_while_cond)), module->AddEmbeddedComputation(std::move(new_while_body)), add_new_instr(remove_constant_elems(while_init)))); new_while_op->CopyBackendConfigFrom(while_op); CopyFrontendAttributes(while_op, new_while_op); CopyMetadata(while_op, new_while_op); TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction( while_op, add_constant_elems(new_while_op))); for (auto& instr : new_instrs) { computation->AddInstruction(std::move(instr)); } return true; } static absl::StatusOr<bool> TryRemoveWhileLoop(HloInstruction* while_op) { if (!while_op->parent()->IsSafelyRemovable(while_op)) { VLOG(2) << "Not attempting to remove while loop that is not removable: " << while_op->ToShortString(); return false; } if (while_op->while_condition()->HasSideEffect()) { VLOG(2) << "Not attempting to remove while loop whose condition contains " "side-effecting instructions: " << while_op->ToShortString(); return false; } optional<int64_t> trip_count = ComputeWhileLoopTripCount(while_op, 1); if (trip_count && *trip_count == 0) { auto computation = while_op->parent(); TF_RETURN_IF_ERROR(computation->ReplaceInstruction( while_op, while_op->mutable_operand(0))); return true; } const auto& attrs = while_op->frontend_attributes().map(); bool skip_trip_count_one_simplification = attrs.contains("skip-simplify-while-loops_trip-count-one") && (attrs.at("skip-simplify-while-loops_trip-count-one") == "true"); if (trip_count && *trip_count == 1 && !skip_trip_count_one_simplification) { bool has_side_effects = absl::c_any_of( while_op->called_computations(), [](const HloComputation* computation) { return computation->HasSideEffect(); }); if (!has_side_effects) { auto computation = while_op->parent(); auto call_op = computation->AddInstruction(HloInstruction::CreateCall( while_op->shape(), while_op->operands(), while_op->while_body())); TF_RETURN_IF_ERROR(computation->ReplaceInstruction(while_op, call_op)); TF_ASSIGN_OR_RETURN(auto inlined_instructions_map, CallInliner::Inline(call_op)); (void)inlined_instructions_map; return true; } else { VLOG(2) << "Not attempting to simplify while loop because it contains a " "side-effecting node: " << while_op->ToShortString(); } } return false; } static absl::StatusOr<bool> TryPropagateConstant(HloInstruction* while_op) { auto while_init = while_op->operand(0); if (while_init->opcode() != HloOpcode::kTuple) { return false; } auto while_body = while_op->while_body(); auto while_body_root = while_body->root_instruction(); if (while_body_root->opcode() != HloOpcode::kTuple) { return false; } auto while_body_param = while_body->parameter_instruction(0); const HloInstruction::InstructionVector& root_operands = while_body_root->operands(); absl::flat_hash_map<int, const HloInstruction*> index_to_constant; for (int i = 0; i < root_operands.size(); i++) { const HloInstruction* init_tuple_elem = nullptr; if (Match(root_operands[i], m::GetTupleElement(m::Op().Is(while_body_param), i) .WithShape(m::Shape().IsScalar())) && Match(while_init->operand(i), m::Constant(&init_tuple_elem))) { VLOG(3) << "Found loop invariant tuple element " << i << " " << init_tuple_elem->ToString(); index_to_constant[i] = init_tuple_elem; } } if (index_to_constant.empty()) { return false; } auto propagate_constant = [&](HloComputation* computation) -> absl::StatusOr<bool> { HloInstruction* param = computation->parameter_instruction(0); bool changed = false; for (auto instr : param->users()) { if (instr->opcode() == HloOpcode::kGetTupleElement) { VLOG(3) << "tuple index " << instr->tuple_index() << " " << instr->ToString(); auto iter = index_to_constant.find(instr->tuple_index()); if (iter != index_to_constant.end()) { const HloInstruction* hlo_constant = (*iter).second; VLOG(3) << "Replace use of " << instr->ToString() << " with " << hlo_constant->ToString(); TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith( computation->AddInstruction(hlo_constant->Clone()))); changed = true; } } } return changed; }; TF_ASSIGN_OR_RETURN(bool changed_cond, propagate_constant(while_op->while_condition())); TF_ASSIGN_OR_RETURN(bool changed_body, propagate_constant(while_body)); return changed_cond || changed_body; } static std::unique_ptr<HloInstruction> UnflattenTupleInstr( absl::Span<HloInstruction*> instrs, const Shape& desired_shape, std::vector<std::unique_ptr<HloInstruction>>* new_instrs) { CHECK(desired_shape.IsTuple()) << ShapeUtil::HumanString(desired_shape); std::vector<HloInstruction*> elems; for (int i = 0; i < desired_shape.tuple_shapes_size(); ++i) { const Shape& subshape = desired_shape.tuple_shapes(i); if (!subshape.IsTuple()) { elems.push_back(instrs[0]); instrs.remove_prefix(1); continue; } int64_t num_leaves = 0; ShapeUtil::ForEachSubshape( subshape, [&](const Shape& s, const ShapeIndex& ) { if (!s.IsTuple()) { ++num_leaves; } }); std::unique_ptr<HloInstruction> subinstr = UnflattenTupleInstr(instrs.subspan(0, num_leaves), desired_shape.tuple_shapes(i), new_instrs); elems.push_back(subinstr.get()); new_instrs->push_back(std::move(subinstr)); instrs.remove_prefix(num_leaves); } return HloInstruction::CreateTuple(elems); } static std::vector<HloInstruction*> GetFlatTupleElems( HloInstruction* instr, std::vector<std::unique_ptr<HloInstruction>>* new_instrs) { const auto& shape = instr->shape(); if (!shape.IsTuple()) { return {instr}; } std::vector<HloInstruction*> elems; for (int i = 0; i < shape.tuple_shapes_size(); ++i) { const Shape& subshape = shape.tuple_shapes(i); new_instrs->push_back( HloInstruction::CreateGetTupleElement(subshape, instr, i)); auto* gte = new_instrs->back().get(); auto flattened_subshape = GetFlatTupleElems(gte, new_instrs); elems.insert(elems.end(), flattened_subshape.begin(), flattened_subshape.end()); } return elems; } static absl::StatusOr<bool> TryFlattenNestedTuples(HloInstruction* while_op) { HloModule* module = while_op->GetModule(); HloComputation* computation = while_op->parent(); auto* while_init = while_op->mutable_operand(0); auto* while_body = while_op->while_body(); auto* while_cond = while_op->while_condition(); auto* while_body_root = while_body->root_instruction(); if (while_init->opcode() != HloOpcode::kTuple || while_body_root->opcode() != HloOpcode::kTuple) { return false; } TF_RET_CHECK(while_cond->num_parameters() == 1); TF_RET_CHECK(while_body->num_parameters() == 1); TF_RET_CHECK( ShapeUtil::Compatible(while_init->shape(), while_body_root->shape())); Shape while_shape = while_init->shape(); if (!ShapeUtil::IsNestedTuple(while_shape)) { return false; } std::vector<const Shape*> flattened_shape_elems; ShapeUtil::ForEachSubshape(while_shape, [&](const Shape& s, const ShapeIndex& ) { if (!s.IsTuple()) { flattened_shape_elems.push_back(&s); } }); Shape flattened_shape = ShapeUtil::MakeTupleShapeWithPtrs(flattened_shape_elems); std::vector<std::unique_ptr<HloInstruction>> new_instrs; auto add_new_instr = [&](std::unique_ptr<HloInstruction> instr) { new_instrs.push_back(std::move(instr)); return new_instrs.back().get(); }; auto nested = [&](HloInstruction* instr) { std::vector<HloInstruction*> gtes; const Shape& flat_shape = instr->shape(); gtes.reserve(flat_shape.tuple_shapes_size()); for (int i = 0; i < flat_shape.tuple_shapes_size(); ++i) { gtes.push_back(add_new_instr(HloInstruction::CreateGetTupleElement( flat_shape.tuple_shapes(i), instr, i))); } auto nested_instr = UnflattenTupleInstr(absl::MakeSpan(gtes), while_shape, &new_instrs); CHECK(ShapeUtil::Compatible(nested_instr->shape(), while_shape)) << ShapeUtil::HumanString(nested_instr->shape()) << " vs " << ShapeUtil::HumanString(while_shape); return nested_instr; }; auto flattened = [&](HloInstruction* instr) { return HloInstruction::CreateTuple(GetFlatTupleElems(instr, &new_instrs)); }; std::unique_ptr<HloComputation> new_while_cond = while_cond->CloneWithReplacementPairs({ while_cond->parameter_instruction(0), nested(add_new_instr(HloInstruction::CreateParameter( 0, flattened_shape, while_cond->parameter_instruction(0)->name()))), }); std::unique_ptr<HloComputation> new_while_body = while_body->CloneWithReplacementPairs( { while_body->parameter_instruction(0), nested(add_new_instr(HloInstruction::CreateParameter( 0, flattened_shape, while_body->parameter_instruction(0)->name()))), }, { while_body->root_instruction(), flattened(add_new_instr(while_body->root_instruction()->Clone())), }); new_instrs.clear(); auto* new_while_op = computation->AddInstruction(HloInstruction::CreateWhile( flattened_shape, module->AddEmbeddedComputation(std::move(new_while_cond)), module->AddEmbeddedComputation(std::move(new_while_body)), computation->AddInstruction(flattened(while_init)))); new_while_op->CopyBackendConfigFrom(while_op); CopyFrontendAttributes(while_op, new_while_op); CopyMetadata(while_op, new_while_op); TF_RETURN_IF_ERROR( computation->ReplaceWithNewInstruction(while_op, nested(new_while_op))); for (auto& instr : new_instrs) { computation->AddInstruction(std::move(instr)); } return true; } static absl::StatusOr<HloInstruction*> TryMergeInductionVariables( HloInstruction* while_op, PrimitiveType elem_ty) { CHECK(primitive_util::IsIntegralType(elem_ty)) << PrimitiveType_Name(elem_ty); HloModule* module = while_op->GetModule(); HloComputation* computation = while_op->parent(); auto* while_init = while_op->mutable_operand(0); auto* while_body = while_op->while_body(); auto* while_cond = while_op->while_condition(); auto* while_body_root = while_body->root_instruction(); if (while_init->opcode() != HloOpcode::kTuple || while_body_root->opcode() != HloOpcode::kTuple) { return nullptr; } TF_RET_CHECK(while_cond->num_parameters() == 1); TF_RET_CHECK(while_body->num_parameters() == 1); TF_RET_CHECK( ShapeUtil::Compatible(while_init->shape(), while_body_root->shape())); Shape while_shape = while_init->shape(); std::optional<int64_t> trip_counter; absl::flat_hash_map<int64_t, const HloConstantInstruction*> induction_vars; for (int64_t i = 0; i < while_body_root->operand_count(); ++i) { HloInstruction* constant; if (!Match(while_body_root->mutable_operand(i), m::AddAnyOrder(m::GetTupleElement(m::Parameter(), i), m::ConstantScalar(&constant)) .WithShape(m::Shape().WithElementType(elem_ty)))) { continue; } if (!trip_counter && constant->literal().IsAll(1) && while_init->operand(i)->IsConstant() && while_init->operand(i)->literal().IsAll(0)) { VLOG(10) << "Found existing trip counter at index " << i; trip_counter = i; } else { VLOG(10) << "Found induction variable at index " << i; induction_vars.emplace(i, Cast<HloConstantInstruction>(constant)); } } if (induction_vars.size() + (trip_counter.has_value() ? 1 : 0) < 2) { return nullptr; } std::vector<std::unique_ptr<HloInstruction>> new_instrs; auto add_new_instr = [&](std::unique_ptr<HloInstruction> instr) { new_instrs.push_back(std::move(instr)); return new_instrs.back().get(); }; auto add_binary_op = [&](const Shape& shape, HloOpcode opcode, HloInstruction* lhs, HloInstruction* rhs) { if (!ShapeUtil::Compatible(shape, lhs->shape())) { lhs = add_new_instr(HloInstruction::CreateReshape(shape, lhs)); } if (!ShapeUtil::Compatible(shape, rhs->shape())) { rhs = add_new_instr(HloInstruction::CreateReshape(shape, rhs)); } return add_new_instr(HloInstruction::CreateBinary(shape, opcode, lhs, rhs)); }; auto add_gte = [&](HloInstruction* src, int64_t idx) { return add_new_instr(HloInstruction::CreateGetTupleElement( src->shape().tuple_shapes(idx), src, idx)); }; Shape new_while_shape = while_shape; bool added_trip_counter = false; if (!trip_counter) { VLOG(10) << "Adding new trip counter to end of loop's tuple."; trip_counter = new_while_shape.tuple_shapes_size(); *new_while_shape.add_tuple_shapes() = ShapeUtil::MakeShape(elem_ty, {}); added_trip_counter = true; } auto convert_to_old_form = [&](HloInstruction* instr) { CHECK(ShapeUtil::Compatible(instr->shape(), new_while_shape)); std::vector<HloInstruction*> tuple_elems; for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { const auto& elem_shape = while_shape.tuple_shapes(i); if (!induction_vars.count(i)) { tuple_elems.push_back(add_gte(instr, i)); continue; } tuple_elems.push_back(add_binary_op( elem_shape, HloOpcode::kAdd, add_gte(instr, i), add_binary_op(elem_shape, HloOpcode::kMultiply, add_gte(instr, *trip_counter), add_new_instr(induction_vars.at(i)->Clone())))); } return HloInstruction::CreateTuple(tuple_elems); }; auto convert_to_new_form = [&](HloInstruction* old_root, HloParameterInstruction* loop_body_param) { CHECK(ShapeUtil::Compatible(old_root->shape(), while_shape)); std::vector<HloInstruction*> tuple_elems; tuple_elems.reserve(while_shape.tuple_shapes_size()); for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { tuple_elems.push_back( add_gte((induction_vars.count(i) ? loop_body_param : old_root), i)); } if (added_trip_counter) { tuple_elems.push_back(add_binary_op( new_while_shape.tuple_shapes(*trip_counter), HloOpcode::kAdd, add_gte(loop_body_param, *trip_counter), add_new_instr( HloInstruction::CreateConstant(LiteralUtil::One(elem_ty))))); } return HloInstruction::CreateTuple(tuple_elems); }; auto get_new_while_init = [&](HloInstruction* init) { CHECK(ShapeUtil::Compatible(init->shape(), while_shape)); if (!added_trip_counter) { return init; } std::vector<HloInstruction*> tuple_elems; tuple_elems.reserve(while_shape.tuple_shapes_size()); for (int i = 0; i < while_shape.tuple_shapes_size(); ++i) { tuple_elems.push_back(add_gte(init, i)); } tuple_elems.push_back(add_new_instr( HloInstruction::CreateConstant(LiteralUtil::Zero(elem_ty)))); return add_new_instr(HloInstruction::CreateTuple(tuple_elems)); }; std::unique_ptr<HloComputation> new_while_cond = while_cond->CloneWithReplacementPairs({ while_cond->parameter_instruction(0), convert_to_old_form(add_new_instr(HloInstruction::CreateParameter( 0, new_while_shape, while_cond->parameter_instruction(0)->name()))), }); HloComputation* temp_new_while_body = module->AddEmbeddedComputation(while_body->CloneWithReplacementPairs({ while_body->parameter_instruction(0), convert_to_old_form(add_new_instr(HloInstruction::CreateParameter( 0, new_while_shape, while_body->parameter_instruction(0)->name()))), })); std::unique_ptr<HloComputation> new_while_body = temp_new_while_body->CloneWithReplacementPairs({ temp_new_while_body->root_instruction(), convert_to_new_form( add_new_instr(temp_new_while_body->root_instruction()->Clone()), Cast<HloParameterInstruction>( temp_new_while_body->parameter_instruction(0))), }); TF_RETURN_IF_ERROR(module->RemoveEmbeddedComputation(temp_new_while_body)); new_instrs.clear(); auto* new_while = computation->AddInstruction(HloInstruction::CreateWhile( new_while_shape, module->AddEmbeddedComputation(std::move(new_while_cond)), module->AddEmbeddedComputation(std::move(new_while_body)), get_new_while_init(while_init))); new_while->CopyBackendConfigFrom(while_op); CopyFrontendAttributes(while_op, new_while); CopyMetadata(while_op, new_while); TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction( while_op, convert_to_old_form(new_while))); for (auto& instr : new_instrs) { computation->AddInstruction(std::move(instr)); } return new_while; } absl::StatusOr<bool> WhileLoopSimplifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES(3, "WhileLoopSimplifier::Run(), before:\n" + module->ToString()); bool changed = false; std::vector<HloInstruction*> while_ops; for (auto* comp : module->computations(execution_threads)) { for (auto* instr : comp->instructions()) { if (instr->opcode() == HloOpcode::kWhile) { while_ops.push_back(instr); } } } for (HloInstruction* while_op : while_ops) { TF_ASSIGN_OR_RETURN(bool result, TryRemoveRepeatedWhileTupleIndices(while_op)); changed |= result; if (result) { continue; } TF_ASSIGN_OR_RETURN(result, TryFlattenNestedTuples(while_op)); changed |= result; if (result) { continue; } TF_ASSIGN_OR_RETURN(result, TryRemoveDeadWhileParams(while_op)); changed |= result; if (result) { continue; } TF_ASSIGN_OR_RETURN(result, TryRemoveConstantParams(while_op)); changed |= result; if (result) { continue; } if (simplify_compare_instrs_) { TF_ASSIGN_OR_RETURN(result, TryRemoveTrivialCompare(while_op)); changed |= result; if (result) { continue; } } if (ContainsInstrWithOpcode(while_op->while_body(), {HloOpcode::kSend, HloOpcode::kSendDone, HloOpcode::kRecv, HloOpcode::kRecvDone}) || ContainsInstrWithOpcode(while_op->while_condition(), {HloOpcode::kSend, HloOpcode::kSendDone, HloOpcode::kRecv, HloOpcode::kRecvDone})) { VLOG(2) << "Not attempting to simplify while loop because it contains a " "send/recv node: " << while_op->ToShortString(); continue; } TF_ASSIGN_OR_RETURN(result, TryPropagateConstant(while_op)); changed |= result; TF_ASSIGN_OR_RETURN(result, TryRemoveWhileLoop(while_op)); changed |= result; if (result) { continue; } if (ContainsInstrWithOpcode(while_op->while_body(), {HloOpcode::kDomain}) || ContainsInstrWithOpcode(while_op->while_condition(), {HloOpcode::kDomain})) { continue; } bool merged_induction_vars = false; for (auto elem_ty : {S8, U8, S32, U32, S64, U64}) { TF_ASSIGN_OR_RETURN(auto* new_while_op, TryMergeInductionVariables(while_op, elem_ty)); if (new_while_op) { while_op = new_while_op; changed = true; merged_induction_vars = true; } } if (merged_induction_vars) { continue; } } HloDCE dce; TF_ASSIGN_OR_RETURN(bool dce_changed, dce.Run(module)); changed |= dce_changed; XLA_VLOG_LINES(3, "WhileLoopSimplifier::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/while_loop_simplifier.h" #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_replace.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal_util.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_parser.h" #include "xla/service/tuple_simplifier.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" namespace xla { namespace { using ::testing::_; namespace op = xla::testing::opcode_matchers; HloInstruction* FindFirstWhile(HloModule* m) { const auto& instrs = m->entry_computation()->instructions(); return *absl::c_find_if(instrs, HloPredicateIsOp<HloOpcode::kWhile>); } class WhileLoopSimplifierTest : public HloTestBase { protected: [[nodiscard]] std::unique_ptr<VerifiedHloModule> MakeModuleWithSimpleLoop( int num_iters); [[nodiscard]] std::unique_ptr<VerifiedHloModule> MakeModuleWithSimpleLoopTupleElementLoopBound(int num_iters); }; std::unique_ptr<VerifiedHloModule> WhileLoopSimplifierTest::MakeModuleWithSimpleLoop(int num_iters) { std::string hlo_string_template = R"( HloModule SimpleLoop SimpleLoop.body { loop_var.1 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2) ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply) } SimpleLoop.condition { loop_var.2 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0 constant.2 = s32[] constant({{LOOP_BOUND}}) ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT } ENTRY SimpleLoop { constant.3 = s32[] constant(42) constant.4 = s32[3]{0} constant({0, 1, 2}) tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4) ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition= SimpleLoop.condition, body=SimpleLoop.body } )"; std::string hlo_string = absl::StrReplaceAll( hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(42 + num_iters)}}); return ParseAndReturnVerifiedModule(hlo_string).value(); } std::unique_ptr<VerifiedHloModule> WhileLoopSimplifierTest::MakeModuleWithSimpleLoopTupleElementLoopBound( int num_iters) { std::string hlo_string_template = R"( HloModule SimpleLoopWithIndirectLoopBound SimpleLoopWithIndirectLoopBound.body { loop_var.1 = (s32[], s32[3]{0}, s32[]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2) limit = s32[] get-tuple-element(loop_var.1), index=2 ROOT tuple = (s32[], s32[3]{0}, s32[]) tuple(add, multiply, limit) } SimpleLoopWithIndirectLoopBound.condition { loop_var.2 = (s32[], s32[3]{0}, s32[]) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0 get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=2 ROOT less-than = pred[] compare(get-tuple-element.3, get-tuple-element.4), direction=LT } ENTRY SimpleLoopWithIndirectLoopBound { constant.3 = s32[] constant(42) constant.4 = s32[3]{0} constant({0, 1, 2}) constant.2 = s32[] constant({{LOOP_BOUND}}) tuple.1 = (s32[], s32[3]{0}, s32[]) tuple(constant.3, constant.4, constant.2) ROOT while = (s32[], s32[3]{0}, s32[]) while(tuple.1), condition=SimpleLoopWithIndirectLoopBound.condition, body=SimpleLoopWithIndirectLoopBound.body } )"; std::string hlo_string = absl::StrReplaceAll( hlo_string_template, {{"{{LOOP_BOUND}}", absl::StrCat(42 + num_iters)}}); return ParseAndReturnVerifiedModule(hlo_string).value(); } TEST_F(WhileLoopSimplifierTest, LoopWithZeroIterationSimplified) { auto m = MakeModuleWithSimpleLoop(0); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), op::Tuple(op::Constant(), op::Constant())); } TEST_F(WhileLoopSimplifierTest, LoopWithZeroIterationTupleElementLoopBoundSimplified) { auto m = MakeModuleWithSimpleLoopTupleElementLoopBound(0); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), op::Tuple(op::Constant(), op::Constant(), op::Constant())); } TEST_F(WhileLoopSimplifierTest, LoopWithOneIterationSimplified) { auto m = MakeModuleWithSimpleLoop(1); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), op::Tuple(op::Add(), op::Multiply())); } TEST_F(WhileLoopSimplifierTest, LoopWithOneIterationTupleELementLoopBoundSimplified) { auto m = MakeModuleWithSimpleLoopTupleElementLoopBound(1); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), op::Tuple(op::Add(), op::Multiply(), op::Constant())); } TEST_F(WhileLoopSimplifierTest, LoopWithTwoIterationsNotSimplified) { auto m = MakeModuleWithSimpleLoop(2); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithControlDependencySimplifiedDependencyPreserved) { auto m = MakeModuleWithSimpleLoop(1); HloComputation* computation = m->entry_computation(); auto* while_op = computation->root_instruction(); ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile); auto* true_op = while_op->while_body()->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); TF_ASSERT_OK(true_op->AddControlDependencyTo( while_op->while_body()->root_instruction())); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_THAT(computation->root_instruction()->control_predecessors(), ElementsAre(op::Constant())) << computation->ToString(); } TEST_F(WhileLoopSimplifierTest, LoopWithSendNotSimplified) { auto m = MakeModuleWithSimpleLoop(1); HloComputation* computation = m->entry_computation(); auto* while_op = computation->root_instruction(); ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile); auto* while_body = while_op->while_body(); auto* token = while_body->AddInstruction(HloInstruction::CreateToken()); auto* send = while_body->AddInstruction(HloInstruction::CreateSend( while_body->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))), token, 0)); while_body->AddInstruction(HloInstruction::CreateSendDone(send)); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithRecvNotSimplified) { auto m = MakeModuleWithSimpleLoop(1); HloComputation* computation = m->entry_computation(); auto* while_op = computation->root_instruction(); ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile); auto* while_body = while_op->while_body(); auto* token = while_body->AddInstruction(HloInstruction::CreateToken()); auto* recv = while_body->AddInstruction( HloInstruction::CreateRecv(ShapeUtil::MakeShape(F32, {1}), token, 0)); while_body->AddInstruction(HloInstruction::CreateRecvDone(recv)); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithInfeedSimplified) { auto m = MakeModuleWithSimpleLoop(1); HloComputation* computation = m->entry_computation(); auto* while_op = computation->root_instruction(); ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile); auto* while_body = while_op->while_body(); auto token = while_body->AddInstruction(HloInstruction::CreateToken()); while_body->AddInstruction(HloInstruction::CreateInfeed( ShapeUtil::MakeShape(F32, {1}), token, "config")); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithInfeedInCondNotSimplified) { auto m = MakeModuleWithSimpleLoop(1); HloComputation* computation = m->entry_computation(); auto* while_op = computation->root_instruction(); ASSERT_EQ(while_op->opcode(), HloOpcode::kWhile); auto* while_cond = while_op->while_condition(); auto token = while_cond->AddInstruction(HloInstruction::CreateToken()); while_cond->AddInstruction(HloInstruction::CreateInfeed( ShapeUtil::MakeShape(F32, {1}), token, "config")); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, NonTupleShapedLoopNotSimplified) { const std::string hlo_string = R"( HloModule NonTupleShapedLoop NonTupleShapedLoop.body { loop_var.1 = s32[] parameter(0) constant.1 = s32[] constant(-1) ROOT add = s32[] add(s32[] loop_var.1, s32[] constant.1) } NonTupleShapedLoop.condition { loop_var = s32[] parameter(0) constant = s32[] constant(100) ROOT less-than = pred[] compare(s32[] loop_var, s32[] constant), direction=LT } ENTRY INonTupleShapedLoop { constant.2 = s32[] constant(42) ROOT while = s32[] while(s32[] constant.2), condition=NonTupleShapedLoop.condition, body=NonTupleShapedLoop.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopSwappingTupleElementsNotSimplified) { const std::string hlo_string = R"( HloModule SwappingTupleElements SwappingTupleElements.body { loop_var = (s32[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element((s32[], s32[]) loop_var),index=1 get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[]) loop_var), index=0 ROOT tuple = (s32[], s32[]) tuple(s32[] get-tuple-element, s32[] get-tuple-element.1) } SwappingTupleElements.always_true { param = (s32[], s32[]) parameter(0) ROOT constant = pred[] constant(true) } ENTRY SwappingTupleElements { x = s32[] parameter(0) y = s32[] parameter(1) tuple.1 = (s32[], s32[]) tuple(s32[] x, s32[] y) ROOT while = (s32[], s32[]) while((s32[], s32[]) tuple.1), condition=SwappingTupleElements.always_true, body=SwappingTupleElements.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithUnusedButModifiedTupleElementNotSimplified) { const std::string hlo_string = R"( HloModule UnusedButModifiedTupleElement UnusedButModifiedTupleElement.body { loop_var = (s32[]) parameter(0) constant.1 = s32[] constant(1) ROOT tuple = (s32[]) tuple(s32[] constant.1) } UnusedButModifiedTupleElement.always_true { param = (s32[]) parameter(0) ROOT constant = pred[] constant(true) } ENTRY UnusedButModifiedTupleElement { constant.2 = s32[] constant(0) tuple.1 = (s32[]) tuple(s32[] constant.2) ROOT while = (s32[]) while((s32[]) tuple.1), condition=UnusedButModifiedTupleElement.always_true, body=UnusedButModifiedTupleElement.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithUnusedOutsideLoopButModifiedTupleElementSimplified) { const std::string hlo_string = R"( HloModule UnusedButModifiedTupleElement UnusedButModifiedTupleElement.body { loop_var = (s32[], s32[]) parameter(0) constant.1 = s32[] constant(1) ROOT tuple = (s32[], s32[]) tuple(s32[] constant.1, constant.1) } UnusedButModifiedTupleElement.cond { param = (s32[], s32[]) parameter(0) gte.cond = s32[] get-tuple-element(param), index=0 constant.3 = s32[] constant(1) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY UnusedButModifiedTupleElement { constant.2 = s32[] constant(0) tuple.1 = (s32[], s32[]) tuple(constant.2, constant.2) while = (s32[], s32[]) while(tuple.1), condition=UnusedButModifiedTupleElement.cond, body=UnusedButModifiedTupleElement.body ROOT gte = s32[] get-tuple-element(while), index=0 } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok()); EXPECT_TRUE(HloDCE().Run(m.get()).ok()); auto m_while = AllOf(op::While(), op::Shape("(s32[])")); EXPECT_THAT(m->entry_computation()->root_instruction(), op::GetTupleElement(m_while)); } TEST_F(WhileLoopSimplifierTest, LoopWithEmptyTupleNotSimplified) { const std::string hlo_string = R"( HloModule EmptyTuple EmptyTuple.body { loop_var = () parameter(0) ROOT tuple = () tuple() } EmptyTuple.always_true { param = () parameter(0) ROOT constant = pred[] constant(true) } ENTRY EmptyTuple { tuple.1 = () tuple() ROOT while = () while(() tuple.1), condition=EmptyTuple.always_true, body=EmptyTuple.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithElemUsedTwiceNotSimplified) { const std::string hlo_string = R"( HloModule ElemUsedTwice ElemUsedTwice.body { param0 = (s32[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element((s32[], s32[]) param0), index=0 ROOT tuple = (s32[], s32[]) tuple(s32[] get-tuple-element, s32[] get-tuple-element) } ElemUsedTwice.always_true { param = (s32[], s32[]) parameter(0) ROOT constant = pred[] constant(true) } ENTRY ElemUsedTwice { x = s32[] parameter(0) y = s32[] parameter(1) tuple.1 = (s32[], s32[]) tuple(s32[] x, s32[] y) ROOT while = (s32[], s32[]) while((s32[], s32[]) tuple.1), condition=ElemUsedTwice.always_true, body=ElemUsedTwice.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, RemoveUnusedLoopOperands) { const std::string hlo_string = R"( HloModule RemoveUnusedOperands RemoveUnusedOperands.body { loop_var = (s32[], s32[], s32[]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=0 get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=1 constant.1 = s32[] constant(1) add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1) get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=2 ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1, s32[] add, s32[] get-tuple-element.3) } RemoveUnusedOperands.loop_condition { constant.2 = s32[] constant(0) param0 = (s32[], s32[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0), index=2 ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ } ENTRY RemoveUnusedOperands { x = s32[] parameter(0) constant.3 = s32[] constant(0) y = s32[] parameter(1) tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3, s32[] y) ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1), condition=RemoveUnusedOperands.loop_condition, body=RemoveUnusedOperands.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); const auto& instrs = m->entry_computation()->instructions(); HloInstruction* new_while_op = *absl::c_find_if(instrs, [&](const HloInstruction* instr) { return (instr->opcode() == HloOpcode::kWhile && instr->name() != "while"); }); auto scalar_s32 = ShapeUtil::MakeShape(S32, {}); EXPECT_TRUE( ShapeUtil::Equal(new_while_op->shape(), ShapeUtil::MakeTupleShape({scalar_s32, scalar_s32}))) << ShapeUtil::HumanString(new_while_op->shape()); EXPECT_THAT( new_while_op->while_body()->root_instruction(), op::Tuple( op::Add(op::GetTupleElement(op::Parameter(0), 0), op::Constant()), op::GetTupleElement(op::Parameter(0), 1))); EXPECT_THAT(new_while_op->while_condition()->root_instruction(), op::Eq(op::Constant(), op::GetTupleElement(op::Parameter(0), 1))); } TEST_F(WhileLoopSimplifierTest, RemoveUnusedLoopOperandsCheckMetadata) { const std::string hlo_string = R"( HloModule RemoveUnusedOperands RemoveUnusedOperands.body { loop_var = (s32[], s32[], s32[]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=0 get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=1 constant.1 = s32[] constant(1) add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1) get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=2 ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1, s32[] add, s32[] get-tuple-element.3) } RemoveUnusedOperands.loop_condition { constant.2 = s32[] constant(0) param0 = (s32[], s32[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0), index=2 ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ } ENTRY RemoveUnusedOperands { x = s32[] parameter(0) constant.3 = s32[] constant(0) y = s32[] parameter(1) tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3, s32[] y) ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1), condition=RemoveUnusedOperands.loop_condition, body=RemoveUnusedOperands.body, metadata={op_name="while"} } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); OpMetadata while_metadata; while_metadata.set_op_name("while"); EXPECT_THAT(m->entry_computation()->root_instruction(), AllOf(op::Tuple(), op::Metadata(while_metadata))); EXPECT_THAT(m->entry_computation()->GetInstructionWithName("while.1"), AllOf(op::While(), op::Metadata(while_metadata))); } TEST_F(WhileLoopSimplifierTest, RemoveUnusedLoopOperandsDespiteSideEffectingOps) { const std::string hlo_string = R"( HloModule RemoveUnusedOperands body { loop_var = (s32[]) parameter(0) gte0 = s32[] get-tuple-element(loop_var), index=0 token0 = token[] after-all() unused = ((s32[], pred[]), token[]) infeed(token0) ROOT tuple = (s32[]) tuple(gte0) } cond { loop_var = (s32[]) parameter(0) ROOT constant = pred[] constant(true) } ENTRY RemoveUnusedOperands { x = s32[] parameter(0) tuple.1 = (s32[]) tuple(s32[] x) ROOT while = (s32[]) while((s32[]) tuple.1), condition=cond, body=body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); const auto& instrs = m->entry_computation()->instructions(); HloInstruction* new_while_op = *absl::c_find_if(instrs, [&](const HloInstruction* instr) { return (instr->opcode() == HloOpcode::kWhile && instr->name() != "while"); }); EXPECT_TRUE(ShapeUtil::IsEmptyTuple(new_while_op->shape())) << new_while_op->shape().ToString(); } TEST_F(WhileLoopSimplifierTest, LoopWithNonTupleBodyShapeNotSimplified) { const std::string hlo_string = R"( HloModule BodyHasNonTupleRoot BodyHasNonTupleRoot.passthrough { ROOT param = (s32[], s32[]) parameter(0) } BodyHasNonTupleRoot.always_true { param.1 = (s32[], s32[]) parameter(0) ROOT constant = pred[] constant(true) } ENTRY BodyHasNonTupleRoot { init_value = (s32[], s32[]) parameter(0) ROOT while = (s32[], s32[]) while((s32[], s32[]) init_value), condition=BodyHasNonTupleRoot.always_true, body=BodyHasNonTupleRoot.passthrough } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithNonTupleBodyRootInstructionNotSimplified) { const std::string hlo_string = R"( HloModule SimpleLoop SimpleLoop.body { loop_var.1 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2) ROOT custom-call = (s32[], s32[3]{0}) custom-call(add, multiply), custom_call_target="x" } SimpleLoop.condition { loop_var.2 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0 constant.2 = s32[] constant(44) ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT } ENTRY SimpleLoop { constant.3 = s32[] constant(42) constant.4 = s32[3]{0} constant({0, 1, 2}) tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4) ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition= SimpleLoop.condition, body=SimpleLoop.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, LoopWithArrayConstantNotSimplified) { const std::string hlo_string = R"( HloModule SimpleLoop SimpleLoop.body { loop_var.1 = (s32[], s32[3]{0}, s32[3]{0}) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 get-tuple-element.3 = s32[3]{0} get-tuple-element(loop_var.1), index=2 add.2 = s32[3]{0} add(get-tuple-element.2, get-tuple-element.3) ROOT tuple = (s32[], s32[3]{0}, s32[3]{0}) tuple(add, add.2, get-tuple-element.3) } SimpleLoop.condition { loop_var.2 = (s32[], s32[3]{0}, s32[3]{0}) parameter(0) get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0 constant.2 = s32[] constant(47) ROOT less-than = pred[] compare(get-tuple-element.4, constant.2), direction=LT } ENTRY SimpleLoop { constant.3 = s32[] constant(42) constant.4 = s32[3]{0} constant({0, 1, 2}) tuple.1 = (s32[], s32[3]{0}, s32[3]{0}) tuple(constant.3, constant.4, constant.4) ROOT while = (s32[], s32[3]{0}, s32[3]{0}) while(tuple.1), condition= SimpleLoop.condition, body=SimpleLoop.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier().Run(m.get()).value()); } TEST_F(WhileLoopSimplifierTest, FlattenNestedTuple) { const std::string hlo_string = R"( HloModule Test Body { param = ((s32[1]), (s32[2], s32[3], (s32[4]))) parameter(0) ta = (s32[1]) get-tuple-element(param), index=0 a = s32[1] get-tuple-element(ta), index=0 a.1 = s32[1] add(a, a) tbcd = (s32[2], s32[3], (s32[4])) get-tuple-element(param), index=1 ROOT tuple = ((s32[1]), (s32[2], s32[3], (s32[4]))) tuple(ta, tbcd) } Cond { param = ((s32[1]), (s32[2], s32[3], (s32[4]))) parameter(0) ROOT cond = pred[] constant(true) } ENTRY Loop { a = s32[1] constant({0}) b = s32[2] constant({0,1}) c = s32[3] constant({0,1,2}) d = s32[4] constant({0,1,2,3}) ta = (s32[1]) tuple(a) td = (s32[4]) tuple(d) tbcd = (s32[2], s32[3], (s32[4])) tuple(b, c, td) init = ((s32[1]), (s32[2], s32[3], (s32[4]))) tuple(ta, tbcd) ROOT while = ((s32[1]), (s32[2], s32[3], (s32[4]))) while(init), condition=Cond, body=Body })"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_TRUE(HloDCE().Run(m.get()).ok()); HloInstruction* new_while = FindFirstWhile(m.get()); Shape flat_tuple = ParseShape("(s32[1], s32[2], s32[3], s32[4])").value(); SCOPED_TRACE(m->ToString()); EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), flat_tuple)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->root_instruction()->shape(), flat_tuple)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->parameter_instruction(0)->shape(), flat_tuple)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_condition()->parameter_instruction(0)->shape(), flat_tuple)); EXPECT_TRUE(ShapeUtil::Equal( m->entry_computation()->root_instruction()->shape(), ParseShape("((s32[1]), (s32[2], s32[3], (s32[4])))").value())); } TEST_F(WhileLoopSimplifierTest, OnlyConstantsInLoopCarry) { const std::string hlo_string = R"( HloModule Test Body { param = (s32[1]) parameter(0) a = s32[1] constant({0}) ROOT tuple = (s32[1]) tuple(a) } Cond { param = (s32[1]) parameter(0) ROOT cond = pred[] constant(true) } ENTRY Loop { a = s32[1] constant({0}) init = (s32[1]) tuple(a) ROOT while = (s32[1]) while(init), condition=Cond, body=Body })"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_TRUE(HloDCE().Run(m.get()).ok()); EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok()); EXPECT_THAT(m->entry_computation()->root_instruction(), op::Tuple(op::Constant())); } TEST_F(WhileLoopSimplifierTest, RemoveConstantFromLoopCarry) { const std::string hlo_string = R"( HloModule Test Body { param = (s32[1], s32[2], s32[3]) parameter(0) a = s32[1] get-tuple-element(param), index=0 a.1 = s32[1] add(a, a) b = s32[2] constant({1,1}) c = s32[3] constant({10,10,10}) ROOT tuple = (s32[1], s32[2], s32[3]) tuple(a.1, b, c) } Cond { param = (s32[1], s32[2], s32[3]) parameter(0) a = s32[1] get-tuple-element(param), index=0 b = s32[2] get-tuple-element(param), index=1 c = s32[3] get-tuple-element(param), index=2 ROOT cond = pred[] constant(true) } ENTRY Loop { a = s32[1] constant({0}) b = s32[2] constant({1,1}) c = s32[3] constant({2,2,2}) init = (s32[1], s32[2], s32[3]) tuple(a,b,c) ROOT while = (s32[1], s32[2], s32[3]) while(init), condition=Cond, body=Body })"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_TRUE(HloDCE().Run(m.get()).ok()); EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok()); HloInstruction* new_while = FindFirstWhile(m.get()); Shape new_while_shape = ParseShape("(s32[1], s32[3])").value(); EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->root_instruction()->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->parameter_instruction(0)->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_condition()->parameter_instruction(0)->shape(), new_while_shape)); EXPECT_TRUE( ShapeUtil::Equal(m->entry_computation()->root_instruction()->shape(), ParseShape("(s32[1], s32[2], s32[3])").value())); EXPECT_THAT(m->entry_computation()->root_instruction(), op::Tuple(_, op::Constant(), _)); } const char* const kSimpleMergeInductionVariablesModule = R"( HloModule Test Body { param = (TYPE[], TYPE[], TYPE[]) parameter(0) a = TYPE[] get-tuple-element(param), index=0 one = TYPE[] constant(1) a1 = TYPE[] add(a, one) b = TYPE[] get-tuple-element(param), index=1 negone = TYPE[] constant(-1) b1 = TYPE[] add(b, negone) c = TYPE[] add(a, b) ROOT tuple = (TYPE[], TYPE[], TYPE[]) tuple(a1,b1,c) } Cond { param = (TYPE[], TYPE[], TYPE[]) parameter(0) a = TYPE[] get-tuple-element(param), index=0 b = TYPE[] get-tuple-element(param), index=1 sum = TYPE[] power(a, b) ten = TYPE[] constant(10) ROOT cond = pred[] compare(sum, ten), direction=LT } ENTRY Loop { a = TYPE[] constant(10) b = TYPE[] constant(100) c = TYPE[] constant(0) init = (TYPE[], TYPE[], TYPE[]) tuple(a,b,c) while = (TYPE[], TYPE[], TYPE[]) while(init), condition=Cond, body=Body a1 = TYPE[] get-tuple-element(while), index=0 b1 = TYPE[] get-tuple-element(while), index=1 c1 = TYPE[] get-tuple-element(while), index=2 sum = TYPE[] add(a1, b1) ROOT sum.1 = TYPE[] add(sum, c1) })"; TEST_F(WhileLoopSimplifierTest, MergeInductionVariables_Simple) { std::string hlo_string = absl::StrReplaceAll( kSimpleMergeInductionVariablesModule, {{"TYPE", "s32"}}); auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_TRUE(HloDCE().Run(m.get()).ok()); EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok()); HloInstruction* new_while = FindFirstWhile(m.get()); SCOPED_TRACE(m->ToString()); Shape new_while_shape = ParseShape("(s32[], s32[], s32[], s32[])").value(); EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->root_instruction()->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->parameter_instruction(0)->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_condition()->parameter_instruction(0)->shape(), new_while_shape)); EXPECT_THAT(new_while->while_body()->root_instruction(), op::Tuple(op::GetTupleElement(op::Parameter(), 0), op::GetTupleElement(op::Parameter(), 1), op::Add(), op::Add(op::GetTupleElement(op::Parameter(), 3), op::Constant()))); EXPECT_THAT(new_while->while_condition()->root_instruction(), op::Lt(op::Power(op::Add(), op::Add()), op::Constant())); } TEST_F(WhileLoopSimplifierTest, MergeInductionVariables_SkipS16) { std::string hlo_string = absl::StrReplaceAll( kSimpleMergeInductionVariablesModule, {{"TYPE", "s16"}}); EXPECT_FALSE(WhileLoopSimplifier() .Run(ParseAndReturnVerifiedModule(hlo_string).value().get()) .value()); } TEST_F(WhileLoopSimplifierTest, RemoveRepeatedParams) { const std::string hlo_string = R"( HloModule SwappingTupleElements SwappingTupleElements.body { loop_var = (s32[], s32[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element(loop_var), index=0 get-tuple-element.1 = s32[] get-tuple-element(loop_var), index=1 get-tuple-element.2 = s32[] get-tuple-element(loop_var), index=2 y = s32[] add(get-tuple-element.1, get-tuple-element.2) ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element, y, s32[] get-tuple-element.2) } SwappingTupleElements.always_true { param = (s32[], s32[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element(param), index=0 get-tuple-element.1 = s32[] get-tuple-element(param), index=1 ROOT less-than = pred[] compare(get-tuple-element, get-tuple-element.1), direction=LT } ENTRY SwappingTupleElements { x = s32[] parameter(0) y = s32[] parameter(1) tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] y, s32[] x) ROOT while = (s32[], s32[], s32[]) while(tuple.1), condition=SwappingTupleElements.always_true, body=SwappingTupleElements.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); HloInstruction* new_while = FindFirstWhile(m.get()); Shape new_while_shape = ParseShape("(s32[], s32[])").value(); EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->root_instruction()->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->parameter_instruction(0)->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_condition()->parameter_instruction(0)->shape(), new_while_shape)); } TEST_F(WhileLoopSimplifierTest, LoopWithUnusedGroupSimplified) { const std::string hlo_string = R"( HloModule LoopWithUnusedGroupSimplified LoopWithUnusedGroupSimplified.body { loop_var = (s32[], s32[], s32[]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=1 gte1 = s32[] get-tuple-element(loop_var), index=2 add = s32[] add(gte0, gte1) ROOT tuple = (s32[], s32[], s32[]) tuple(constant.1, add, add) } LoopWithUnusedGroupSimplified.cond { param = (s32[], s32[], s32[]) parameter(0) gte.cond = s32[] get-tuple-element(param), index=0 constant.3 = s32[] constant(1) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY LoopWithUnusedGroupSimplified { constant.2 = s32[] constant(0) tuple.1 = (s32[], s32[], s32[]) tuple(constant.2, constant.2, constant.2) while = (s32[], s32[], s32[]) while(tuple.1), condition=LoopWithUnusedGroupSimplified.cond, body=LoopWithUnusedGroupSimplified.body ROOT gte = s32[] get-tuple-element(while), index=0 } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok()); EXPECT_TRUE(HloDCE().Run(m.get()).ok()); auto m_while = AllOf(op::While(), op::Shape("(s32[])")); EXPECT_THAT(m->entry_computation()->root_instruction(), op::GetTupleElement(m_while)); } TEST_F(WhileLoopSimplifierTest, LoopWithUnusedNonPassthroughElementSimplified) { const std::string hlo_string = R"( HloModule LoopWithUnusedNonPassthroughElementSimplified LoopWithUnusedNonPassthroughElementSimplified.body { loop_var = (s32[], s32[], s32[]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=1 gte1 = s32[] get-tuple-element(loop_var), index=2 add = s32[] add(gte0, gte1) add2 = s32[] add(gte0, gte0) ROOT tuple = (s32[], s32[], s32[]) tuple(constant.1, add2, add) } LoopWithUnusedNonPassthroughElementSimplified.cond { param = (s32[], s32[], s32[]) parameter(0) gte.cond = s32[] get-tuple-element(param), index=0 constant.3 = s32[] constant(1) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY LoopWithUnusedNonPassthroughElementSimplified { constant.2 = s32[] constant(0) tuple.1 = (s32[], s32[], s32[]) tuple(constant.2, constant.2, constant.2) while = (s32[], s32[], s32[]) while(tuple.1), condition=LoopWithUnusedNonPassthroughElementSimplified.cond, body=LoopWithUnusedNonPassthroughElementSimplified.body gte2 = s32[] get-tuple-element(while), index=0 gte3 = s32[] get-tuple-element(while), index=1 ROOT tuple.2 = (s32[], s32[]) tuple(gte2, gte3) } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); EXPECT_TRUE(TupleSimplifier().Run(m.get()).ok()); EXPECT_TRUE(HloDCE().Run(m.get()).ok()); EXPECT_THAT(m->entry_computation()->root_instruction(), AllOf(op::While(), op::Shape("(s32[], s32[])"))); } TEST_F(WhileLoopSimplifierTest, RemoveUnusedParamsDespiteSendRecv) { const std::string hlo_string = R"( HloModule RemoveUnusedParamsDespiteSendRecv RemoveUnusedParamsDespiteSendRecv.body { loop_var = (s32[], s32[], s32[]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=0 get-tuple-element.2 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=1 constant.1 = s32[] constant(1) token.1 = token[] after-all() send.1 = (s32[], u32[], token[]) send(constant.1, token.1), channel_id=42, is_host_transfer=true send-done.1 = token[] send-done(send.1), channel_id=42, is_host_transfer=true recv.1 = (s32[], u32[], token[]) recv(send-done.1), channel_id=43, is_host_transfer=true add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1) recv-done.1 = (s32[], token[]) recv-done(recv.1), channel_id=43, is_host_transfer=true get-tuple-element.3 = s32[] get-tuple-element((s32[], s32[], s32[]) loop_var), index=2 ROOT tuple = (s32[], s32[], s32[]) tuple(s32[] get-tuple-element.1, s32[] add, s32[] get-tuple-element.3) } RemoveUnusedParamsDespiteSendRecv.loop_condition { constant.2 = s32[] constant(0) param0 = (s32[], s32[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element((s32[], s32[], s32[]) param0), index=2 ROOT equal-to = pred[] compare(s32[] constant.2, s32[] get-tuple-element), direction=EQ } ENTRY RemoveUnusedParamsDespiteSendRecv { x = s32[] parameter(0) constant.3 = s32[] constant(0) y = s32[] parameter(1) tuple.1 = (s32[], s32[], s32[]) tuple(s32[] x, s32[] constant.3, s32[] y) ROOT while = (s32[], s32[], s32[]) while((s32[], s32[], s32[]) tuple.1), condition=RemoveUnusedParamsDespiteSendRecv.loop_condition, body=RemoveUnusedParamsDespiteSendRecv.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); HloInstruction* new_while = FindFirstWhile(m.get()); Shape new_while_shape = ParseShape("(s32[], s32[])").value(); EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->root_instruction()->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->parameter_instruction(0)->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_condition()->parameter_instruction(0)->shape(), new_while_shape)); } TEST_F(WhileLoopSimplifierTest, RemoveTrivialCompare) { const std::string hlo_template = R"( HloModule RemoveTrivialCompare RemoveTrivialCompare.body { loop_var = (pred[], s32[]) parameter(0) get-tuple-element.2 = s32[] get-tuple-element((pred[], s32[]) loop_var), index=1 cons = s32[] constant({{LOOP_CONSTANT}}) comp = pred[] compare(get-tuple-element.2, cons), direction={{DIRECTION}} constant.1 = s32[] constant(1) add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1) ROOT tuple = (pred[], s32[]) tuple(comp, s32[] add) } RemoveTrivialCompare.loop_condition { constant.2 = s32[] constant(10) param0 = (pred[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element((pred[], s32[]) param0), index=1 ROOT equal-to = pred[] compare(s32[] get-tuple-element, s32[] constant.2), direction=LT } ENTRY RemoveTrivialCompare { constant.3 = s32[] constant(1) t = pred[] constant(true) tuple.1 = (pred[], s32[]) tuple(t, s32[] constant.3) ROOT while = (pred[], s32[]) while((pred[], s32[]) tuple.1), condition=RemoveTrivialCompare.loop_condition, body=RemoveTrivialCompare.body } )"; for (std::string dir : {"LT", "GT"}) { for (int i = 1; i > -5; i--) { std::string hlo_string = absl::StrReplaceAll( hlo_template, {{"{{LOOP_CONSTANT}}", absl::StrCat(i)}, {"{{DIRECTION}}", dir}}); auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier(true) .Run(m.get()) .value()); HloInstruction* while_instr = FindFirstWhile(m.get()); EXPECT_THAT(while_instr->while_body()->root_instruction(), op::Tuple(op::Constant(), _)); EXPECT_TRUE(while_instr->while_body() ->root_instruction() ->operand(0) ->literal() .IsAll(dir == "GT")); } for (int i = 11; i < 15; i++) { std::string hlo_string = absl::StrReplaceAll( hlo_template, {{"{{LOOP_CONSTANT}}", absl::StrCat(i)}, {"{{DIRECTION}}", dir}}); auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(WhileLoopSimplifier(true) .Run(m.get()) .value()); HloInstruction* while_instr = FindFirstWhile(m.get()); EXPECT_THAT(while_instr->while_body()->root_instruction(), op::Tuple(op::Constant(), _)); EXPECT_TRUE(while_instr->while_body() ->root_instruction() ->operand(0) ->literal() .IsAll(dir == "LT")); } } } TEST_F(WhileLoopSimplifierTest, NotRemoveCompare) { const std::string hlo_string = R"( HloModule RemoveTrivialCompare RemoveTrivialCompare.body { loop_var = (pred[], s32[]) parameter(0) get-tuple-element.2 = s32[] get-tuple-element((pred[], s32[]) loop_var), index=1 five = s32[] constant(5) comp = pred[] compare(get-tuple-element.2, five), direction=LT constant.1 = s32[] constant(1) add = s32[] add(s32[] get-tuple-element.2, s32[] constant.1) ROOT tuple = (pred[], s32[]) tuple(comp, s32[] add) } RemoveTrivialCompare.loop_condition { constant.2 = s32[] constant(10) param0 = (pred[], s32[]) parameter(0) get-tuple-element = s32[] get-tuple-element((pred[], s32[]) param0), index=1 ROOT equal-to = pred[] compare(s32[] get-tuple-element, s32[] constant.2), direction=LT } ENTRY RemoveTrivialCompare { constant.3 = s32[] constant(0) t = pred[] constant(true) tuple.1 = (pred[], s32[]) tuple(t, s32[] constant.3) ROOT while = (pred[], s32[]) while((pred[], s32[]) tuple.1), condition=RemoveTrivialCompare.loop_condition, body=RemoveTrivialCompare.body } )"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(WhileLoopSimplifier(true) .Run(m.get()) .value()); } TEST_F(WhileLoopSimplifierTest, RemoveDynUpdSlice) { const std::string hlo_string = R"( HloModule jit_scan %region_0.6 (arg_tuple.7: (s32[], f32[], f32[3], f32[3])) -> (s32[], f32[], f32[3], f32[3]) { %arg_tuple.7 = (s32[], f32[], f32[3]{0}, f32[3]{0}) parameter(0) %get-tuple-element.8 = s32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=0 %constant.12 = s32[] constant(1) %add.28 = s32[] add(s32[] %get-tuple-element.8, s32[] %constant.12) %get-tuple-element.9 = f32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=1 %sine.15 = f32[] sine(f32[] %get-tuple-element.9) %get-tuple-element.10 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=2 %cosine.16 = f32[] cosine(f32[] %get-tuple-element.9) %reshape.18 = f32[1]{0} reshape(f32[] %cosine.16) %constant.14 = s32[] constant(0) %compare.19 = pred[] compare(s32[] %get-tuple-element.8, s32[] %constant.14), direction=LT %constant.13 = s32[] constant(3) %add.20 = s32[] add(s32[] %get-tuple-element.8, s32[] %constant.13) %select.21 = s32[] select(pred[] %compare.19, s32[] %add.20, s32[] %get-tuple-element.8) %dynamic-update-slice.22 = f32[3]{0} dynamic-update-slice(f32[3]{0} %get-tuple-element.10, f32[1]{0} %reshape.18, s32[] %select.21) %get-tuple-element.11 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.7), index=3 %dynamic-update-slice.27 = f32[3]{0} dynamic-update-slice(f32[3]{0} %get-tuple-element.11, f32[1]{0} %reshape.18, s32[] %select.21) ROOT %tuple.29 = (s32[], f32[], f32[3]{0}, f32[3]{0}) tuple(s32[] %add.28, f32[] %sine.15, f32[3]{0} %dynamic-update-slice.22, f32[3]{0} %dynamic-update-slice.27) } %region_1.30 (arg_tuple.31: (s32[], f32[], f32[3], f32[3])) -> pred[] { %arg_tuple.31 = (s32[], f32[], f32[3]{0}, f32[3]{0}) parameter(0) %get-tuple-element.32 = s32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %arg_tuple.31), index=0 %constant.36 = s32[] constant(3) ROOT %compare.37 = pred[] compare(s32[] %get-tuple-element.32, s32[] %constant.36), direction=LT } ENTRY %main.44 (Arg_0.1: f32[]) -> (f32[], f32[3], f32[3]) { %constant.4 = s32[] constant(0) %Arg_0.1 = f32[] parameter(0), sharding={replicated} %constant.2 = f32[] constant(0) %broadcast.3 = f32[3]{0} broadcast(f32[] %constant.2), dimensions={} %tuple.5 = (s32[], f32[], f32[3]{0}, f32[3]{0}) tuple(s32[] %constant.4, f32[] %Arg_0.1, f32[3]{0} %broadcast.3, f32[3]{0} %broadcast.3) %while.38 = (s32[], f32[], f32[3]{0}, f32[3]{0}) while((s32[], f32[], f32[3]{0}, f32[3]{0}) %tuple.5), condition=%region_1.30, body=%region_0.6 %get-tuple-element.40 = f32[] get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %while.38), index=1 %get-tuple-element.41 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %while.38), index=2 %get-tuple-element.42 = f32[3]{0} get-tuple-element((s32[], f32[], f32[3]{0}, f32[3]{0}) %while.38), index=3 ROOT %tuple.43 = (f32[], f32[3]{0}, f32[3]{0}) tuple(f32[] %get-tuple-element.40, f32[3]{0} %get-tuple-element.41, f32[3]{0} %get-tuple-element.42) })"; auto m = ParseAndReturnVerifiedModule(hlo_string).value(); ASSERT_TRUE(WhileLoopSimplifier().Run(m.get()).value()); HloInstruction* new_while = FindFirstWhile(m.get()); Shape new_while_shape = ParseShape("(s32[], f32[], f32[3]{0})").value(); EXPECT_TRUE(ShapeUtil::Equal(new_while->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->root_instruction()->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_body()->parameter_instruction(0)->shape(), new_while_shape)); EXPECT_TRUE(ShapeUtil::Equal( new_while->while_condition()->parameter_instruction(0)->shape(), new_while_shape)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
740c6a4b-47a4-47dd-979f-37886ea081a6
cpp
tensorflow/tensorflow
shaped_buffer
third_party/xla/xla/service/shaped_buffer.cc
third_party/xla/xla/service/shaped_buffer_test.cc
#include "xla/service/shaped_buffer.h" #include <memory> #include <string> #include <utility> #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "xla/layout_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/logging.h" namespace xla { ShapedBuffer::ShapedBuffer(Shape on_device_shape, int device_ordinal, int physical_device_ordinal) : on_device_shape_(std::move(on_device_shape)), device_ordinal_(device_ordinal), buffers_(&on_device_shape_) { physical_device_ordinal_ = physical_device_ordinal == -1 ? device_ordinal_ : physical_device_ordinal; on_host_shape_ = ShapeUtil::DeviceShapeToHostShape(on_device_shape_); } ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape, int device_ordinal, int physical_device_ordinal) : ShapedBuffer(on_device_shape, device_ordinal, physical_device_ordinal) {} ShapedBuffer::ShapedBuffer(ShapedBuffer&& s) noexcept : on_host_shape_(std::move(s.on_host_shape_)), on_device_shape_(std::move(s.on_device_shape_)), device_ordinal_(s.device_ordinal_), physical_device_ordinal_(s.physical_device_ordinal_), buffers_(std::move(s.buffers_)) { buffers_.replace_shape_ptr(on_device_shape_); } ShapedBuffer& ShapedBuffer::operator=(ShapedBuffer&& s) noexcept { on_device_shape_ = std::move(s.on_device_shape_); on_host_shape_ = std::move(s.on_host_shape_); device_ordinal_ = s.device_ordinal_; physical_device_ordinal_ = s.physical_device_ordinal_; buffers_ = std::move(s.buffers_); buffers_.replace_shape_ptr(on_device_shape_); return *this; } ShapedBuffer::~ShapedBuffer() {} absl::StatusOr<ShapedBuffer> ShapedBuffer::SubShapedBuffer( const ShapeIndex& index) const { TF_ASSIGN_OR_RETURN(const Shape* device_sub_shape, ShapeUtil::TryGetSubshape(on_device_shape(), index)); ShapedBuffer sub_shaped_buffer(*device_sub_shape, device_ordinal_, physical_device_ordinal_); TF_ASSIGN_OR_RETURN(ShapeTree<se::DeviceMemoryBase> sub_buffers, buffers_.SubShapeTree(index)); sub_shaped_buffer.set_buffers(std::move(sub_buffers)); return std::move(sub_shaped_buffer); } void ShapedBuffer::clear() { for (auto& pair : buffers_) { pair.second = se::DeviceMemoryBase(); } } std::string ShapedBuffer::ToString() const { std::string s = absl::StrCat("ShapedBuffer(", device_ordinal(), "), on-device shape=" + ShapeUtil::HumanStringWithLayout(on_device_shape()), ":\n"); ShapeUtil::ForEachSubshape( on_device_shape(), [this, &s](const Shape& subshape, const ShapeIndex& index) { std::string shape_str; if (subshape.IsTuple()) { shape_str = "tuple"; } else { shape_str = ShapeUtil::HumanStringWithLayout(subshape); } const se::DeviceMemoryBase& memory = buffer(index); absl::StrAppendFormat(&s, " %s%p (%d bytes) : %s\n", std::string(index.size() * 2, ' '), memory.opaque(), memory.size(), shape_str); }); return s; } std::ostream& operator<<(std::ostream& out, const ShapedBuffer& buffer) { out << buffer.ToString(); return out; } ScopedShapedBuffer::ScopedShapedBuffer(Shape on_device_shape, se::DeviceMemoryAllocator* allocator, int device_ordinal, int physical_device_ordinal) : ShapedBuffer(std::move(on_device_shape), device_ordinal, physical_device_ordinal), allocator_(allocator) {} ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape, Shape on_device_shape, se::DeviceMemoryAllocator* allocator, int device_ordinal, int physical_device_ordinal) : ScopedShapedBuffer(std::move(on_device_shape), allocator, device_ordinal, physical_device_ordinal) {} ScopedShapedBuffer::ScopedShapedBuffer(ShapedBuffer shaped_buffer, se::DeviceMemoryAllocator* allocator) : ShapedBuffer(std::move(shaped_buffer)), allocator_(allocator) {} ScopedShapedBuffer::ScopedShapedBuffer(ScopedShapedBuffer&& s) noexcept : ShapedBuffer(static_cast<ShapedBuffer&&>(s)), allocator_(s.allocator_) { s.allocator_ = nullptr; } ScopedShapedBuffer& ScopedShapedBuffer::operator=( ScopedShapedBuffer&& s) noexcept { Deallocate(); *static_cast<ShapedBuffer*>(this) = std::move(static_cast<ShapedBuffer&>(s)); allocator_ = s.allocator_; s.allocator_ = nullptr; return *this; } ScopedShapedBuffer::~ScopedShapedBuffer() { Deallocate(); } ShapedBuffer ScopedShapedBuffer::release() { ShapedBuffer shaped_buffer(static_cast<ShapedBuffer&&>(*this)); buffers_ = ShapeTree<se::DeviceMemoryBase>(); return shaped_buffer; } void ScopedShapedBuffer::Deallocate() { if (allocator_ == nullptr) { return; } absl::flat_hash_set<void*> deallocated_ptrs; for (auto& pair : buffers_) { se::DeviceMemoryBase& memory_base = pair.second; if (!memory_base.is_null() && deallocated_ptrs.insert(memory_base.opaque()).second) { TF_CHECK_OK(allocator_->Deallocate(device_ordinal(), memory_base)); } } } ScopedShapedBuffer ScopedShapedBuffer::TakeSubTree(ShapeIndexView index) { const xla::Shape& sub_on_device_shape = xla::ShapeUtil::GetSubshape(on_device_shape(), {index}); ScopedShapedBuffer output(sub_on_device_shape, memory_allocator(), device_ordinal(), physical_device_ordinal()); auto src_it = buffers().find(index); auto dst_it = output.buffers().begin(); while (dst_it != output.buffers().end()) { dst_it->second = src_it->second; src_it->second = tensorflow::se::DeviceMemoryBase(nullptr, 0); ++src_it; ++dst_it; } return output; } }
#include "xla/service/shaped_buffer.h" #include <memory> #include <utility> #include <vector> #include "xla/service/platform_util.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_memory_allocator.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/test.h" #include "tsl/platform/test_benchmark.h" namespace xla { namespace { TEST(ShapedBufferTest, ScopedShapeBufferAsShapedBufferB71629047) { TF_ASSERT_OK_AND_ASSIGN(auto* platform, xla::PlatformUtil::GetDefaultPlatform()); TF_ASSERT_OK_AND_ASSIGN(auto executors, xla::PlatformUtil::GetStreamExecutors(platform)); xla::se::StreamExecutorMemoryAllocator allocator(platform, executors); const xla::Shape shape = xla::ShapeUtil::MakeShape(xla::F32, {}); const int kDeviceOrdinal = 0; auto scoped_buffer = std::make_unique<xla::ScopedShapedBuffer>( shape, shape, &allocator, kDeviceOrdinal); std::unique_ptr<xla::ShapedBuffer> buffer = std::move(scoped_buffer); buffer = nullptr; } class TestAllocator : public se::DeviceMemoryAllocator { public: TestAllocator() : se::DeviceMemoryAllocator(PlatformUtil::GetDefaultPlatform().value()) {} ~TestAllocator() override { if (!allocations_.empty()) { ADD_FAILURE() << "Some allocations not freed!"; } } using se::DeviceMemoryAllocator::Allocate; absl::StatusOr<se::OwningDeviceMemory> Allocate( int device_ordinal, uint64_t size, bool , int64_t ) override { if (size == 0) { return se::OwningDeviceMemory(); } void* buf = malloc(size); allocations_.insert({device_ordinal, buf}); return se::OwningDeviceMemory(se::DeviceMemoryBase(buf, size), device_ordinal, this); } absl::Status Deallocate(int device_ordinal, se::DeviceMemoryBase mem) override { if (mem.is_null()) { return absl::OkStatus(); } auto it = allocations_.find({device_ordinal, mem.opaque()}); if (it == allocations_.end()) { ADD_FAILURE() << "Allocation not found (double free?)"; } else { free(mem.opaque()); allocations_.erase(it); } return absl::OkStatus(); } bool AllowsAsynchronousDeallocation() const override { return false; } absl::StatusOr<se::Stream*> GetStream(int device_ordinal) override { LOG(FATAL) << "Not implemented"; } private: std::set<std::pair< int64_t, void*>> allocations_; }; TEST(ScopedShapedBufferTest, TestMoveAssignmentOperator) { Shape s = ShapeUtil::MakeShape(F32, {1}); TestAllocator allocator; ScopedShapedBuffer sb1(s, &allocator, 0); sb1.set_buffer(allocator.Allocate(0, 42).value(), {}); ScopedShapedBuffer sb2(s, &allocator, 1); sb2.set_buffer(allocator.Allocate(1, 10).value(), {}); sb1 = std::move(sb2); } TEST(ScopedShapedBufferTest, TestTakeSubTree) { TestAllocator allocator; Shape s = ShapeUtil::MakeShape(F32, {1}); s = xla::ShapeUtil::MakeTupleShape(std::vector<xla::Shape>(2, s)); s = xla::ShapeUtil::MakeTupleShape(std::vector<xla::Shape>(3, s)); ScopedShapedBuffer sb(s, &allocator, 0); sb.buffers().ForEachMutableElement( [&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) { TF_ASSERT_OK_AND_ASSIGN( se::OwningDeviceMemory m, allocator.Allocate(0, 77)); *buffer = m.Release(); }); ShapeTree<se::DeviceMemoryBase> buffers = sb.buffers(); xla::ShapeIndex subtree_index = {1}; ScopedShapedBuffer output = sb.TakeSubTree(subtree_index); output.buffers().ForEachElement([&](const xla::ShapeIndex& sub_index, const se::DeviceMemoryBase& buffer) { xla::ShapeIndex orig_index = subtree_index; for (int i : sub_index) { orig_index.push_back(i); } EXPECT_TRUE(buffers.find(orig_index)->second.IsSameAs(buffer)); }); sb.buffers().ForEachElement([&](const xla::ShapeIndex& index, const se::DeviceMemoryBase& buffer) { if ((index.size() >= subtree_index.size()) && ShapeIndexView(index).first(subtree_index.size()) == subtree_index) { EXPECT_TRUE(buffer.is_null()); } else { EXPECT_TRUE(buffers.find(index)->second.IsSameAs(buffer)); } }); } TEST(ScopedShapedBufferTest, TestSubShapeTree) { Shape array_shape = ShapeUtil::MakeShape(F32, {1}); Shape tuple_shape = xla::ShapeUtil::MakeTupleShape({array_shape, array_shape}); TestAllocator allocator; ScopedShapedBuffer sb(tuple_shape, &allocator, 0); sb.buffers().ForEachMutableElement( [&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) { TF_ASSERT_OK_AND_ASSIGN( se::OwningDeviceMemory m, allocator.Allocate(0, 32)); *buffer = m.Release(); }); auto ssb_statusor = sb.SubShapedBuffer({1}); ASSERT_TRUE(ssb_statusor.ok()); auto ssb = std::move(ssb_statusor).value(); EXPECT_EQ(ssb.on_host_shape(), array_shape); EXPECT_EQ(ssb.on_device_shape(), array_shape); } void BM_TakeSubTree(::testing::benchmark::State& state) { const int depth = state.range(0); const int fan_out = state.range(1); TestAllocator allocator; xla::Shape shape = xla::ShapeUtil::MakeShape(xla::F32, {32, 64, 128}); for (int i = 0; i < depth; ++i) { std::vector<xla::Shape> shapes(fan_out, shape); shape = xla::ShapeUtil::MakeTupleShape(shapes); } xla::ScopedShapedBuffer shaped_buffer(shape, &allocator, 0); for (auto s : state) { (void)shaped_buffer.TakeSubTree({fan_out / 2}).release(); } } BENCHMARK(BM_TakeSubTree) ->ArgPair(1, 4) ->ArgPair(1, 8) ->ArgPair(1, 32) ->ArgPair(1, 64) ->ArgPair(1, 128) ->ArgPair(1, 256) ->ArgPair(1, 512) ->ArgPair(2, 4) ->ArgPair(2, 8) ->ArgPair(2, 32) ->ArgPair(2, 64) ->ArgPair(2, 128); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/shaped_buffer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/shaped_buffer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b9c06777-8747-4fb4-8b7a-eda275f5c353
cpp
tensorflow/tensorflow
hlo_graph_dumper
third_party/xla/xla/service/hlo_graph_dumper.cc
third_party/xla/xla/service/hlo_graph_dumper_test.cc
#include "xla/service/hlo_graph_dumper.h" #include <cstdint> #include <unordered_map> #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/hash/hash.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/shape.h" #include "tsl/platform/errors.h" #include "tsl/platform/file_system.h" #include "tsl/platform/statusor.h" #include "tsl/platform/thread_annotations.h" #ifndef _WIN32 #include <unistd.h> #endif #include <algorithm> #include <atomic> #include <deque> #include <functional> #include <map> #include <memory> #include <optional> #include <queue> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/str_replace.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/literal.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/pattern_matcher.h" #include "xla/shape_util.h" #include "xla/stream_executor/dnn.h" #include "xla/tsl/lib/gtl/map_util.h" #include "xla/tsl/lib/io/zlib_compression_options.h" #include "xla/tsl/lib/io/zlib_outputbuffer.h" #include "xla/types.h" #include "xla/util.h" #include "xla/window_util.h" #include "tsl/platform/base64.h" #include "tsl/platform/env.h" #include "tsl/platform/numbers.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/regexp.h" #include "tsl/platform/status.h" namespace xla { namespace { using absl::StrAppend; using absl::StrCat; using absl::StrFormat; using absl::StrJoin; using std::nullopt; using std::optional; enum NodeFilterResult { kNormalNode, kHideNode, kHighlightNode, kSomeOperandsOmitted, kOmitNodeOperands, kSomeUsersOmitted, }; class NodeFilter { public: NodeFilter() : filter_([](const HloInstruction*) { return kNormalNode; }) {} explicit NodeFilter( std::function<NodeFilterResult(const HloInstruction* instr)> filter, std::optional<int> num_rendered = std::nullopt) : filter_(std::move(filter)), num_rendered_(num_rendered) {} bool Show(const HloInstruction* instr) const { return filter_(instr) != kHideNode; } bool Highlight(const HloInstruction* instr) const { return filter_(instr) == kHighlightNode; } bool OmitOperands(const HloInstruction* instr) const { return filter_(instr) == kOmitNodeOperands; } bool SomeOrAllOperandsOmitted(const HloInstruction* instr) const { auto result = filter_(instr); return result == kOmitNodeOperands || result == kSomeOperandsOmitted; } bool Deemphasized(const HloInstruction* instr) const { auto result = filter_(instr); return result == kOmitNodeOperands || result == kSomeOperandsOmitted || result == kSomeUsersOmitted; } std::optional<int> GetNumRendered() const { return num_rendered_; } private: std::function<NodeFilterResult(const HloInstruction* instr)> filter_; std::optional<int> num_rendered_; }; bool IsSmall(const HloInstruction* instr) { if (ShapeUtil::HasPrimitiveType(instr->shape(), OPAQUE_TYPE) || ShapeUtil::HasPrimitiveType(instr->shape(), TOKEN)) { return true; } return ShapeUtil::ElementsInRecursive(instr->shape()) < 4096; } enum ColorScheme { kBlue, kBrown, kDarkBlue, kDarkGreen, kDarkOrange, kDarkRed, kGray, kGreen, kOrange, kPurple, kRed, kWhite, kYellow, kDashedBorder, }; struct NodeColors { std::string style; std::string fill_color; std::string stroke_color; std::string font_color; }; NodeColors NodeColorsForScheme(ColorScheme color) { switch (color) { case kBlue: return NodeColors{"filled", "#bbdefb", "#8aacc8", "black"}; case kBrown: return NodeColors{"filled", "#bcaaa4", "#8c7b75", "black"}; case kDarkBlue: return NodeColors{"filled", "#1565c0", "#003c8f", "white"}; case kDarkGreen: return NodeColors{"filled", "#2e7d32", "#005005", "white"}; case kDarkOrange: return NodeColors{"filled", "#ffb74d", "#c88719", "black"}; case kDarkRed: return NodeColors{"filled", "#b71c1c", "#7f0000", "white"}; case kGray: return NodeColors{"filled", "#cfd8dc", "#9ea7aa", "black"}; case kGreen: return NodeColors{"filled", "#c8e6c9", "#97b498", "black"}; case kOrange: return NodeColors{"filled", "#ffe0b2", "#cbae82", "black"}; case kPurple: return NodeColors{"filled", "#e1bee7", "#af8eb5", "black"}; case kRed: return NodeColors{"filled", "#ffcdd2", "#cb9ca1", "black"}; case kWhite: return NodeColors{"filled", "white", "#9e9e9e", "black"}; case kYellow: return NodeColors{"filled", "#fff9c4", "#cbc693", "black"}; case kDashedBorder: return NodeColors{"filled,dashed", "white", "#757575", "#757575"}; } } std::string NodeFillColorForStatistic(const Statistic& statistic) { auto stat_val = statistic.stat_val(); if (stat_val == 0) { return "#f5f5f5"; } else if (stat_val < 10) { return "#f7d4cc"; } else if (stat_val < 20) { return "#f8b2a3"; } else if (stat_val < 30) { return "#f9a28f"; } else if (stat_val < 40) { return "#fa917b"; } else if (stat_val < 50) { return "#fb8066"; } else if (stat_val < 60) { return "#fc7052"; } else if (stat_val < 70) { return "#fd5f3d"; } else if (stat_val < 80) { return "#fd4e29"; } else if (stat_val < 90) { return "#fe3e14"; } else { return "#ff2d00"; } } std::string NodeFontColorForStatistic(const Statistic& statistic) { if (statistic.stat_val() < 60) { return "black"; } else { return "white"; } } std::string NodeColorAttributes(ColorScheme color) { NodeColors node_colors = NodeColorsForScheme(color); return StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")", node_colors.style, node_colors.font_color, node_colors.stroke_color, node_colors.fill_color); } std::string HtmlLikeStringSanitize(absl::string_view s) { return absl::StrReplaceAll(s, {{"<", "&lt;"}, {">", "&gt;"}, {"\"", "&quot;"}}); } bool IsFusedBroadcastOfConstantEffectiveScalar(const HloInstruction* instr) { namespace m = match; return instr->parent()->IsFusionComputation() && Match(instr, m::Broadcast(m::ConstantEffectiveScalar())); } optional<std::string> MatchTrivialComputation( const HloComputation* computation) { namespace m = match; if (computation->instruction_count() != 3) { return nullopt; } HloInstruction* root = computation->root_instruction(); const HloInstruction *param0, *param1; if (!Match(root, m::Op() .WithNumOperands(2) .WithShape(m::Shape().IsEffectiveScalar()) .WithBinaryOperandsAnyOrder( m::Parameter(&param0, 0) .WithShape(m::Shape().IsEffectiveScalar()), m::Parameter(&param1, 1) .WithShape(m::Shape().IsEffectiveScalar())))) { return nullopt; } if (root->operand(0) == param1) { CHECK_EQ(root->operand(1), param0); if (root->opcode() == HloOpcode()) { switch (root->comparison_direction()) { case ComparisonDirection::kLe: case ComparisonDirection::kGe: case ComparisonDirection::kGt: case ComparisonDirection::kLt: return nullopt; default: break; } } } switch (root->opcode()) { case HloOpcode::kAdd: return "add"; case HloOpcode::kMultiply: return "multiply"; case HloOpcode::kMinimum: return "min"; case HloOpcode::kMaximum: return "max"; case HloOpcode::kXor: return "xor"; case HloOpcode::kAnd: return "and"; case HloOpcode::kOr: return "or"; case HloOpcode::kCompare: { switch (root->comparison_direction()) { case ComparisonDirection::kLe: return "less-or-equal"; case ComparisonDirection::kGe: return "greater-or-equal"; case ComparisonDirection::kGt: return "greater-than"; case ComparisonDirection::kLt: return "less-than"; case ComparisonDirection::kEq: return "equal-to"; case ComparisonDirection::kNe: return "not-equal-to"; } } default: return nullopt; } } class HloDotDumper { public: HloDotDumper( const HloComputation* computation, absl::string_view label, const DebugOptions& debug_options, HloRenderOptions hlo_render_options, NodeFilter filter, std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map = std::nullopt) : computation_(computation), label_(label), debug_options_(debug_options), hlo_render_options_(hlo_render_options), filter_(std::move(filter)), color_map_(color_map) {} std::string Dump(); std::optional<std::string> CssIdForInstruction(const HloInstruction& instr) { if (instr.opcode() == HloOpcode::kFusion) { auto it = cluster_ids_.find(instr.called_computations()[0]); if (it == cluster_ids_.end()) { return std::nullopt; } return StrCat("#a_clust", it->second, " path"); } auto it = node_ids_.find(&instr); if (it == node_ids_.end()) { return std::nullopt; } return StrCat("#node", it->second, " polygon"); } private: std::string InstructionId(const HloInstruction* instruction) { return StrCat(reinterpret_cast<uint64_t>(instruction)); } std::string SubcomputationId(const HloComputation* computation) { return StrCat("cluster_", reinterpret_cast<uint64_t>(computation)); } std::string Header(); std::string Footer(); bool ShouldShowSubcomputation(const HloComputation* subcomp); bool ShouldShowFusionSubcomputation(const HloInstruction* instr); bool ShouldMergeIntoUsers(const HloInstruction* instr) const; std::string DumpSubcomputation(const HloComputation* subcomp, const HloInstruction* parent_instr); std::string DumpComputation(const HloComputation* comp); std::string DumpRootTag(); std::string DumpInstruction(const HloInstruction* instr); ColorScheme GetInstructionColor(const HloInstruction* instr); std::string GetInstructionNodeShape(const HloInstruction* instr); std::string GetInstructionNodeLabel(const HloInstruction* instr); std::string GetInstructionNodeMetadata(const HloInstruction* instr); std::string GetInstructionNodeBackendConfig(const HloInstruction* instr); std::string GetInstructionNodeExtraInfo(const HloInstruction* instr); std::string GetInstructionNodeInlinedOperands(const HloInstruction* instr); void AddInstructionIncomingEdges(const HloInstruction* instr); const HloInstruction* GetNodeForEdge(const HloInstruction* instr); std::string GetInstructionTrivialComputationStr(const HloInstruction* instr); const HloComputation* computation_; const std::string label_; const DebugOptions& debug_options_; const HloRenderOptions hlo_render_options_; const NodeFilter filter_; const std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map_; int64_t next_node_id_ = 1; absl::flat_hash_map<const HloInstruction*, int64_t> node_ids_; int64_t root_node_id_; int64_t next_edge_id_ = 1; std::unordered_multimap< std::pair<const HloInstruction*, const HloInstruction*>, int64_t, absl::Hash<std::pair<const HloInstruction*, const HloInstruction*>>> edge_ids_; int64_t next_cluster_id_ = 1; absl::flat_hash_map<const HloComputation*, int64_t> cluster_ids_; std::vector<std::string> edges_; absl::flat_hash_map<HloSharding, ColorScheme> sharding_colors_; int64_t next_shard_color_ = 0; }; std::string HloDotDumper::Dump() { std::string body; StrAppend(&body, DumpComputation(computation_)); StrAppend(&body, DumpRootTag()); std::string g = Header(); StrAppend(&g, body); StrAppend(&g, Footer()); return g; } std::string HloDotDumper::Header() { constexpr char fmt[] = R"(digraph G { rankdir = TB; compound = true; label = <<b>%s</b>>; labelloc = t; tooltip = " "; stylesheet=< data:text/css, @import url(https: svg text { font-family: 'Roboto'; font-size: 12px; } %s > )"; VLOG(3) << "Generating Header"; std::string graph_label = StrCat(label_, "<br/>Computation ", computation_->name()); if (computation_->IsFusionComputation()) { StrAppend(&graph_label, " (in fusion instruction ", computation_->FusionInstruction()->name(), ")"); } std::vector<std::string> edge_css_rules; std::string kBlue = "#1976d2"; std::string kRed = "#d32f2f"; for (const auto& kv : edge_ids_) { const HloInstruction* from_node = kv.first.first; const HloInstruction* to_node = kv.first.second; int64_t edge_id = kv.second; auto add_hover_css_rule = [&](std::string elem_type, int64_t elem_id, std::string color) { edge_css_rules.push_back( StrFormat(" #%s%d:hover ~ #edge%d text { fill: %s; }\n" " #%s%d:hover ~ #edge%d path { " "stroke: %s; stroke-width: .2em; }\n" " #%s%d:hover ~ #edge%d polygon { " "fill: %s; stroke: %s; stroke-width: .2em; }\n", elem_type, elem_id, edge_id, color, elem_type, elem_id, edge_id, color, elem_type, elem_id, edge_id, color, color)); }; int64_t from_node_id = tsl::gtl::FindWithDefault(node_ids_, from_node, -1); if (from_node_id == -1) { LOG(FATAL) << from_node->name() << " was added to edges but not to nodes"; } int64_t to_node_id = to_node ? tsl::gtl::FindWithDefault(node_ids_, to_node, -1) : root_node_id_; if (to_node != nullptr && to_node_id == -1) { LOG(FATAL) << to_node->name() << " was added to edges but not to nodes"; } add_hover_css_rule("node", from_node_id, kBlue); add_hover_css_rule("node", to_node_id, kRed); if (to_node) { VLOG(3) << "Adding css for edge " << edge_id << " from node " << from_node->name() << " to node " << to_node->name(); } else { VLOG(3) << "Adding css for edge " << edge_id << " from node " << from_node->name() << " to root tag"; } if (to_node) { if (from_node->IsFused() && from_node->parent()->root_instruction() == from_node) { int64_t cluster_id = cluster_ids_.at(from_node->parent()); add_hover_css_rule("clust", cluster_id, kBlue); } if (to_node->IsFused() && to_node->opcode() == HloOpcode::kParameter) { int64_t cluster_id = cluster_ids_.at(to_node->parent()); add_hover_css_rule("clust", cluster_id, kRed); } } } return StrFormat( fmt, graph_label, absl::StrReplaceAll(StrJoin(edge_css_rules, "\n"), {{"#", "%23"}})); } std::string HloDotDumper::Footer() { return StrCat(StrJoin(edges_, "\n"), "\n}"); } bool HloDotDumper::ShouldShowFusionSubcomputation(const HloInstruction* instr) { CHECK_EQ(instr->opcode(), HloOpcode::kFusion); return ShouldShowSubcomputation(instr->fused_instructions_computation()); } bool HloDotDumper::ShouldShowSubcomputation(const HloComputation* subcomp) { if (subcomp->IsFusionComputation()) { const HloInstruction* fusion = subcomp->FusionInstruction(); if (!filter_.Show(fusion) || filter_.SomeOrAllOperandsOmitted(fusion) || !hlo_render_options_.show_fusion_subcomputations) { return false; } } if (!subcomp->IsFusionComputation() && MatchTrivialComputation(subcomp)) { return false; } if (subcomp->WhileCallInstruction() != nullptr && !hlo_render_options_.show_while_subcomputations) { return false; } return absl::c_any_of( subcomp->instructions(), [&](const HloInstruction* instr) { return filter_.Show(instr); }); } std::string HloDotDumper::DumpSubcomputation( const HloComputation* subcomp, const HloInstruction* parent_instr) { VLOG(2) << "Dumping subcomputation " << subcomp->name(); if (parent_instr->opcode() != HloOpcode::kFusion) { const HloInstruction* from = GetNodeForEdge(subcomp->root_instruction()); VLOG(2) << "Edge: from " << from->name() << " to " << parent_instr->name() << " as " << next_edge_id_; edge_ids_.insert({{from, parent_instr}, next_edge_id_++}); constexpr char edge_fmt[] = R"(%s -> %s [ltail="%s", style="dashed" tooltip="%s -> %s"];)"; edges_.push_back(StrFormat( edge_fmt, InstructionId(from), InstructionId(parent_instr), SubcomputationId(subcomp), subcomp->name(), parent_instr->name())); } if (cluster_ids_.find(subcomp) != cluster_ids_.end()) { return ""; } cluster_ids_[subcomp] = next_cluster_id_++; std::string id = SubcomputationId(subcomp); std::string subcomp_label, style; if (parent_instr->opcode() == HloOpcode::kFusion) { subcomp_label = StrFormat("Fused expression for <b>%s</b><br/>%s", HtmlLikeStringSanitize(parent_instr->name()), HtmlLikeStringSanitize(parent_instr->ToCategory())); std::string extra_info = GetInstructionNodeExtraInfo(parent_instr); if (!extra_info.empty()) { StrAppend(&subcomp_label, "<br/>", extra_info); } std::string node_backend_config = GetInstructionNodeBackendConfig(parent_instr); if (!node_backend_config.empty()) { StrAppend(&subcomp_label, "<br/>", node_backend_config); } bool highlight = filter_.Highlight(parent_instr); std::string fillcolor; std::string strokecolor; if (!highlight && (parent_instr->module_has_statistics() || parent_instr->has_statistics())) { fillcolor = parent_instr->has_statistics() ? NodeFillColorForStatistic( parent_instr->statistic_to_visualize()) : "#f5f5f5"; strokecolor = "#c2c2c2"; } else if (debug_options_.xla_hlo_graph_sharding_color() && !highlight) { NodeColors node_colors = NodeColorsForScheme(GetInstructionColor(parent_instr)); fillcolor = node_colors.fill_color; strokecolor = node_colors.stroke_color; } else { fillcolor = highlight ? "#ffcdd2" : "#f5f5f5"; strokecolor = highlight ? "#b71c1c" : "#c2c2c2"; } style = StrFormat(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")", fillcolor, strokecolor); } else { subcomp_label = StrFormat("Subcomputation for <b>%s</b><br/>%s", HtmlLikeStringSanitize(parent_instr->name()), HtmlLikeStringSanitize(subcomp->name())); style = "style=rounded; color=black;"; } std::string comp_body = DumpComputation(subcomp); constexpr char computation_fmt[] = R"(subgraph %s { %s label = <%s>; labelloc = t; tooltip = " "; %s } )"; return StrFormat(computation_fmt, id, style, subcomp_label, comp_body, id); } std::string HloDotDumper::DumpComputation(const HloComputation* comp) { std::string g; for (const auto* instr : comp->instructions()) { if (!filter_.Show(instr)) { continue; } for (const HloComputation* subcomp : instr->called_computations()) { if (ShouldShowSubcomputation(subcomp)) { StrAppend(&g, DumpSubcomputation(subcomp, instr)); } } StrAppend(&g, DumpInstruction(instr)); } return g; } std::string HloDotDumper::DumpRootTag() { const HloInstruction* from = GetNodeForEdge(computation_->root_instruction()); if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant || IsFusedBroadcastOfConstantEffectiveScalar(from)) { return ""; } auto from_id = InstructionId(from); HloInstruction* to = nullptr; auto to_id = SubcomputationId(computation_); std::string node_body = "ROOT"; std::string node_shape = "circle"; ColorScheme color = kBrown; VLOG(2) << "Adding root tag as node " << next_node_id_; root_node_id_ = next_node_id_++; VLOG(2) << "Adding edge from " << from->name() << " to root tag as " << next_edge_id_; edge_ids_.insert({{from, to}, next_edge_id_++}); edges_.push_back(StrFormat(R"(%s -> %s [tooltip=" "];)", from_id, to_id)); return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)" "\n", to_id, node_body, node_shape, NodeColorAttributes(color)); } static const HloConstantInstruction* TryGetFusionParameterConstant( const HloInstruction* instr) { if (instr->opcode() != HloOpcode::kParameter || !instr->IsFused()) { return nullptr; } const HloInstruction* fusion = instr->parent()->FusionInstruction(); const HloInstruction* operand = fusion->operand(instr->parameter_number()); return DynCast<HloConstantInstruction>(operand); } bool HloDotDumper::ShouldMergeIntoUsers(const HloInstruction* instr) const { if ((instr->opcode() == HloOpcode::kGetTupleElement && instr != instr->parent()->root_instruction()) || TryGetFusionParameterConstant(instr) != nullptr) { return true; } const int kMinUsersToOmit = 3; return instr->opcode() == HloOpcode::kParameter && instr->shape().IsTuple() && !instr->IsFused() && absl::c_count_if(instr->users(), [&](const HloInstruction* user) { return filter_.Show(user); }) > kMinUsersToOmit && absl::c_all_of(instr->users(), [&](const HloInstruction* user) { return !filter_.Show(user) || user->opcode() == HloOpcode::kGetTupleElement; }); } std::string HloDotDumper::DumpInstruction(const HloInstruction* instr) { if ((instr->opcode() == HloOpcode::kConstant || IsFusedBroadcastOfConstantEffectiveScalar(instr)) && instr != instr->parent()->root_instruction()) { return ""; } if (ShouldMergeIntoUsers(instr)) { return ""; } if (instr->opcode() == HloOpcode::kFusion && ShouldShowFusionSubcomputation(instr)) { return ""; } VLOG(2) << "Adding node " << instr->name() << " as " << next_node_id_; node_ids_[instr] = next_node_id_++; std::string node_shape = GetInstructionNodeShape(instr); std::string node_label = GetInstructionNodeLabel(instr); std::string node_metadata = GetInstructionNodeMetadata(instr); std::string node_backend_config = GetInstructionNodeBackendConfig(instr); std::string extra_info = GetInstructionNodeExtraInfo(instr); std::string inlined_constants = GetInstructionNodeInlinedOperands(instr); std::string trivial_subcomputation = GetInstructionTrivialComputationStr(instr); AddInstructionIncomingEdges(instr); NodeColors node_colors; std::string node_style; std::string node_attributes; if (hlo_render_options_.override_node_colors && color_map_.has_value()) { if (color_map_->contains(instr)) { node_colors.fill_color = color_map_->at(instr).color; node_attributes = color_map_->at(instr).stats; } else { VLOG(2) << "color_map_ for instruction:" << instr->name() << "is empty" << "\n"; node_colors.fill_color = "#808080"; } node_colors.style = "filled"; node_colors.font_color = "black"; node_colors.stroke_color = "#c2c2c2"; node_style = StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")", node_colors.style, node_colors.font_color, node_colors.stroke_color, node_colors.fill_color); } else { ColorScheme color = GetInstructionColor(instr); if (!debug_options_.xla_hlo_graph_sharding_color()) { if (filter_.Deemphasized(instr)) { color = kDashedBorder; } if (filter_.Highlight(instr)) { node_shape = "diamond"; color = kDarkRed; } } node_colors = NodeColorsForScheme(color); if (instr->has_statistics()) { const auto& statistic_to_visualize = instr->statistic_to_visualize(); node_colors.fill_color = NodeFillColorForStatistic(statistic_to_visualize); node_colors.stroke_color = "#c2c2c2"; node_colors.font_color = NodeFontColorForStatistic(statistic_to_visualize); } else if (instr->module_has_statistics()) { node_colors.fill_color = "#f5f5f5"; node_colors.stroke_color = "#c2c2c2"; node_colors.font_color = "black"; } node_style = StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")", node_colors.style, node_colors.font_color, node_colors.stroke_color, node_colors.fill_color); } std::string node_body = node_label; for (const std::string& s : {trivial_subcomputation, extra_info, inlined_constants, node_backend_config, node_attributes}) { if (!s.empty()) { StrAppend(&node_body, "<br/>", s); } } return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)" "\n", InstructionId(instr), node_body, node_shape, node_metadata, node_style); } std::string HloDotDumper::GetInstructionNodeInlinedOperands( const HloInstruction* instr) { auto stringify_constant = [](const HloConstantInstruction* constant, const Shape& shape) { if (ShapeUtil::IsZeroElementArray(shape)) { return StrFormat("{} (%s)", ShapeUtil::HumanString(constant->shape())); } optional<int64_t> elem_count; if (shape.IsArray()) { elem_count = ShapeUtil::ElementsIn(constant->shape()); } if (elem_count.has_value() && *elem_count <= 8 && constant->HasLiteral()) { std::string literal_str = constant->literal().ToStringWithoutShape(); if (literal_str.size() <= 64) { return StrFormat("%s %s", shape.ToString(), literal_str); } } std::string constant_name; if (absl::StartsWith(constant->name(), "constant")) { constant_name = std::string(constant->name()); } else { constant_name = StrCat("constant ", constant->name()); } return StrFormat("%s %s", constant_name, ShapeUtil::HumanString(shape)); }; std::vector<std::string> lines; constexpr int64_t kMaxOperandsShown = 32; for (int64_t i = 0; i < instr->operand_count(); ++i) { const HloInstruction* operand = instr->operand(i); optional<std::string> operand_str; if (const auto* constant_operand = DynCast<HloConstantInstruction>(operand)) { operand_str = stringify_constant(constant_operand, constant_operand->shape()); } else if (IsFusedBroadcastOfConstantEffectiveScalar(operand)) { operand_str = stringify_constant( Cast<HloConstantInstruction>(operand->operand(0)), operand->shape()); } else if (ShouldMergeIntoUsers(operand)) { if (operand->opcode() == HloOpcode::kParameter) { if (const HloConstantInstruction* constant = TryGetFusionParameterConstant(operand)) { operand_str = stringify_constant(constant, constant->shape()); } else { operand_str = StrFormat("Parameter %d", operand->parameter_number()); } } else if (operand->opcode() == HloOpcode::kGetTupleElement) { operand_str = StrFormat("tuple-element %d of %s %s", operand->tuple_index(), operand->operand(0)->name(), ShapeUtil::HumanStringWithLayout(operand->shape())); } else { operand_str = std::string(operand->name()); } } if (operand_str) { if (instr->operand_count() > 1) { lines.push_back(StrFormat("<b>operand %d</b> = %s", i, *operand_str)); } else { lines.push_back(StrFormat("<b>operand</b> = %s", *operand_str)); } } if (lines.size() == kMaxOperandsShown && i < instr->operand_count() - 1) { lines.push_back("..."); break; } } if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) { const HloInstruction* param_input = instr->parent()->FusionInstruction()->operand( instr->parameter_number()); if (param_input->opcode() == HloOpcode::kGetTupleElement) { lines.push_back( StrFormat("tuple-element %d of %s %s", param_input->tuple_index(), param_input->operand(0)->name(), ShapeUtil::HumanStringWithLayout(param_input->shape()))); } } return StrJoin(lines, "<br/>"); } ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) { if (debug_options_.xla_hlo_graph_sharding_color()) { if (!instr->has_sharding()) { return kDashedBorder; } auto it = sharding_colors_.find(instr->sharding()); if (it != sharding_colors_.end()) { return it->second; } ColorScheme color = static_cast<ColorScheme>( kBlue + (next_shard_color_++ % (kDashedBorder - kBlue))); sharding_colors_.emplace(instr->sharding(), color); return color; } auto parameter_color = IsSmall(instr) ? kOrange : kDarkOrange; if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) { return operand->opcode() == HloOpcode::kParameter && ShouldMergeIntoUsers(operand) && TryGetFusionParameterConstant(operand) == nullptr; })) { return parameter_color; } switch (instr->opcode()) { case HloOpcode::kAbs: case HloOpcode::kAdd: case HloOpcode::kAnd: case HloOpcode::kAtan2: case HloOpcode::kBitcastConvert: case HloOpcode::kCeil: case HloOpcode::kClamp: case HloOpcode::kClz: case HloOpcode::kCompare: case HloOpcode::kComplex: case HloOpcode::kConvert: case HloOpcode::kCos: case HloOpcode::kDivide: case HloOpcode::kErf: case HloOpcode::kExp: case HloOpcode::kExpm1: case HloOpcode::kFloor: case HloOpcode::kImag: case HloOpcode::kIota: case HloOpcode::kIsFinite: case HloOpcode::kLog: case HloOpcode::kLog1p: case HloOpcode::kMaximum: case HloOpcode::kMinimum: case HloOpcode::kMultiply: case HloOpcode::kNegate: case HloOpcode::kNot: case HloOpcode::kPopulationCount: case HloOpcode::kOr: case HloOpcode::kXor: case HloOpcode::kPower: case HloOpcode::kReal: case HloOpcode::kReducePrecision: case HloOpcode::kRemainder: case HloOpcode::kRng: case HloOpcode::kRngGetAndUpdateState: case HloOpcode::kRngBitGenerator: case HloOpcode::kRoundNearestAfz: case HloOpcode::kRoundNearestEven: case HloOpcode::kRsqrt: case HloOpcode::kSelect: case HloOpcode::kShiftLeft: case HloOpcode::kShiftRightArithmetic: case HloOpcode::kShiftRightLogical: case HloOpcode::kStochasticConvert: case HloOpcode::kLogistic: case HloOpcode::kSign: case HloOpcode::kSin: case HloOpcode::kSlice: case HloOpcode::kSort: case HloOpcode::kTopK: case HloOpcode::kSqrt: case HloOpcode::kCbrt: case HloOpcode::kSubtract: case HloOpcode::kTan: case HloOpcode::kTanh: return kWhite; case HloOpcode::kAddDependency: case HloOpcode::kAfterAll: case HloOpcode::kGetTupleElement: case HloOpcode::kOptimizationBarrier: case HloOpcode::kPad: case HloOpcode::kTuple: return kWhite; case HloOpcode::kConstant: return kWhite; case HloOpcode::kBroadcast: case HloOpcode::kDynamicUpdateSlice: return kYellow; case HloOpcode::kConcatenate: case HloOpcode::kDynamicSlice: case HloOpcode::kReshape: case HloOpcode::kDynamicReshape: case HloOpcode::kReverse: case HloOpcode::kTranspose: return kGreen; case HloOpcode::kCopy: case HloOpcode::kCopyStart: case HloOpcode::kCopyDone: return kGreen; case HloOpcode::kBitcast: if (!instr->IsFused()) { return kWhite; } return kGreen; case HloOpcode::kAsyncStart: case HloOpcode::kAsyncUpdate: case HloOpcode::kAsyncDone: return GetInstructionColor(instr->async_wrapped_instruction()); case HloOpcode::kConvolution: case HloOpcode::kDot: case HloOpcode::kFft: case HloOpcode::kTriangularSolve: case HloOpcode::kCholesky: return kDarkBlue; case HloOpcode::kParameter: return parameter_color; case HloOpcode::kBatchNormGrad: case HloOpcode::kBatchNormInference: case HloOpcode::kBatchNormTraining: case HloOpcode::kReduce: case HloOpcode::kReduceWindow: case HloOpcode::kScatter: case HloOpcode::kSelectAndScatter: case HloOpcode::kGather: return kPurple; case HloOpcode::kDomain: case HloOpcode::kFusion: case HloOpcode::kMap: case HloOpcode::kGetDimensionSize: case HloOpcode::kSetDimensionSize: return kGray; case HloOpcode::kAllGather: case HloOpcode::kAllGatherStart: case HloOpcode::kAllGatherDone: case HloOpcode::kAllReduce: case HloOpcode::kReduceScatter: case HloOpcode::kAllReduceStart: case HloOpcode::kAllReduceDone: case HloOpcode::kAllToAll: case HloOpcode::kCollectiveBroadcast: case HloOpcode::kCollectivePermute: case HloOpcode::kCollectivePermuteStart: case HloOpcode::kCollectivePermuteDone: case HloOpcode::kInfeed: case HloOpcode::kOutfeed: case HloOpcode::kPartitionId: case HloOpcode::kRecv: case HloOpcode::kRecvDone: case HloOpcode::kSend: case HloOpcode::kSendDone: case HloOpcode::kReplicaId: return kBrown; case HloOpcode::kCall: case HloOpcode::kConditional: case HloOpcode::kCustomCall: case HloOpcode::kWhile: return kDarkGreen; } } std::string HloDotDumper::GetInstructionNodeShape(const HloInstruction* instr) { switch (instr->opcode()) { case HloOpcode::kWhile: return "ellipse"; default: return "rect"; } } std::string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kParameter) { return StrFormat("<b>Parameter %d</b>", instr->parameter_number()); } if (absl::StartsWith(instr->name(), HloOpcodeString(instr->opcode()))) { return StrFormat("<b>%s</b>", HtmlLikeStringSanitize(instr->name())); } std::string extended_opcode = StrCat(HloOpcodeString(instr->opcode()), instr->opcode() != HloOpcode::kFusion ? "" : StrCat(":", xla::ToString(instr->fusion_kind()))); return StrFormat("<b>%s</b><br/>%s", HtmlLikeStringSanitize(instr->name()), HtmlLikeStringSanitize(extended_opcode)); } std::string HloDotDumper::GetInstructionNodeMetadata( const HloInstruction* instr) { std::vector<std::string> lines; if (!instr->metadata().op_name().empty()) { lines.push_back(HtmlLikeStringSanitize(instr->metadata().op_name())); } if (!instr->metadata().op_type().empty()) { lines.push_back(StrFormat( "op_type: %s", HtmlLikeStringSanitize(instr->metadata().op_type()))); } if (!instr->metadata().source_file().empty() && instr->metadata().source_line() != 0) { lines.push_back(StrFormat("source: %s:%d", instr->metadata().source_file(), instr->metadata().source_line())); } if (instr->metadata().stack_frame_id() != 0) { auto hlo_module = instr->parent()->parent(); int frame_id = instr->metadata().stack_frame_id(); while (frame_id != 0) { HloModule::StackFrame frame = hlo_module->get_stack_frame(frame_id); if (frame.empty()) { break; } frame_id = frame.parent_frame_id; lines.push_back(StrFormat( "%s:%s:%d%s", frame.file_name, frame.function_name, frame.line, frame.column == 0 ? "" : StrFormat(":%d", frame.column))); } } return StrJoin(lines, "\n"); } static std::vector<std::pair<std::string, std::string>> ExtractCudnnConvBackendConfigProps(const gpu::CudnnConvBackendConfig& config) { std::vector<std::pair<std::string, std::string>> props; if (config.conv_result_scale() != 1) { props.emplace_back("conv_result_scale", StrCat(config.conv_result_scale())); } if (config.side_input_scale() != 0 && config.side_input_scale() != 1) { props.emplace_back("side_input_scale", StrCat(config.side_input_scale())); } if (config.activation_mode() == se::dnn::ActivationMode::kLeakyRelu) { props.emplace_back("leakyrelu_alpha", StrCat(config.leakyrelu_alpha())); } props.emplace_back( "activation_mode", se::dnn::ActivationModeString( static_cast<se::dnn::ActivationMode>(config.activation_mode()))); props.emplace_back("algo", se::dnn::AlgorithmDesc(config.algorithm()).ToString()); return props; } static std::vector<std::pair<std::string, std::string>> ExtractGemmBackendConfigProps(const gpu::GemmBackendConfig& config, const HloInstruction* instr) { std::vector<std::pair<std::string, std::string>> props; if (primitive_util::IsComplexType(instr->shape().element_type())) { if (config.alpha_real() != 1 || config.alpha_imag() != 1) { props.emplace_back("alpha_real", StrCat(config.alpha_real())); props.emplace_back("alpha_imag", StrCat(config.alpha_real())); } } else { if (config.alpha_real() != 1) { props.emplace_back("alpha", StrCat(config.alpha_real())); } } if (config.beta() != 0 && config.beta() != 1) { props.emplace_back("beta", StrCat(config.beta())); } props.emplace_back( "", absl::StrReplaceAll( DotDimensionNumbersToString(config.dot_dimension_numbers()), {{", ", "<br/>"}})); if (config.algorithm_case() == gpu::GemmBackendConfig::kSelectedAlgorithm) { props.emplace_back("algorithm", StrCat(config.selected_algorithm())); } if (config.epilogue() != gpu::GemmBackendConfig::DEFAULT) { props.emplace_back( "epilogue", gpu::GemmBackendConfig::Epilogue_Name(config.epilogue())); } return props; } std::string HloDotDumper::GetInstructionNodeBackendConfig( const HloInstruction* instr) { std::vector<std::pair<std::string, std::string>> props; if (gpu::IsCustomCallToDnnConvolution(*instr)) { absl::StatusOr<gpu::GpuBackendConfig> config = instr->backend_config<gpu::GpuBackendConfig>(); if (config.ok()) { props = ExtractCudnnConvBackendConfigProps( config->cudnn_conv_backend_config()); } } else if (gpu::IsCublasGemm(*instr)) { absl::StatusOr<gpu::GpuBackendConfig> config = instr->backend_config<gpu::GpuBackendConfig>(); if (config.ok()) { props = ExtractGemmBackendConfigProps(config->gemm_backend_config(), instr); } } if (!props.empty()) { return StrCat((props.size() > 1 ? "<br/>" : ""), StrJoin(props, "<br/>", [](std::string* out, const std::pair<std::string, std::string>& kv) { if (!kv.first.empty()) { return StrAppend(out, kv.first, "=", kv.second); } StrAppend(out, kv.second); })); } if (!hlo_render_options_.show_backend_config || instr->raw_backend_config_string().empty()) { return ""; } return StrCat("backend_config=\"", instr->raw_backend_config_string(), "\""); } std::string HloDotDumper::GetInstructionNodeExtraInfo( const HloInstruction* instr) { std::vector<std::string> lines; for (const auto& line : instr->ExtraAttributesToString( HloPrintOptions().set_print_subcomputation_mode( HloPrintOptions::PrintSubcomputationMode::kOff))) { constexpr int kMaxDeviceIdFieldLen = 128; if ((absl::StartsWith(line, "replica_groups=") || absl::StartsWith(line, "source_target_pairs=") || absl::StartsWith(line, "control-predecessors=")) && line.length() > kMaxDeviceIdFieldLen) { lines.push_back(HtmlLikeStringSanitize( StrCat(line.substr(0, kMaxDeviceIdFieldLen - 3), "..."))); } else if (absl::StartsWith(line, "feature_group_count=")) { lines.push_back(StrFormat("<b>%s</b>", HtmlLikeStringSanitize(line))); } else { lines.push_back(HtmlLikeStringSanitize(line)); } } if (instr->opcode() != HloOpcode::kFusion || !ShouldShowFusionSubcomputation(instr)) { bool shape_is_multidim = false; ShapeUtil::ForEachSubshape(instr->shape(), [&](const Shape& s, const ShapeIndex&) { shape_is_multidim |= s.dimensions_size() > 1; }); std::string instr_shape; if (instr->opcode() != HloOpcode::kTuple && shape_is_multidim) { instr_shape = ShapeUtil::HumanStringWithLayout(instr->shape()); } else { instr_shape = ShapeUtil::HumanString(instr->shape()); } constexpr int kMaxShapeLen = 64; if (instr_shape.length() > kMaxShapeLen) { instr_shape = StrCat( absl::string_view(instr_shape).substr(0, kMaxShapeLen - 3), "..."); } lines.push_back(HtmlLikeStringSanitize(instr_shape)); } if (debug_options_.xla_hlo_graph_addresses()) { lines.push_back(StrFormat("[%p]", instr)); } return StrJoin(lines, "<br/>"); } void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) { constexpr int kMaxEdgesBetweenTwoNodes = 64; auto add_edge = [&](const HloInstruction* from, const HloInstruction* to, int64_t operand_num, bool control_edge = false) { if (edge_ids_.count({from, to}) > kMaxEdgesBetweenTwoNodes) { return; } from = GetNodeForEdge(from); if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant || IsFusedBroadcastOfConstantEffectiveScalar(from) || ShouldMergeIntoUsers(from)) { return; } VLOG(2) << "Adding edge from " << from->name() << " to " << to->name() << " as " << next_edge_id_; edge_ids_.insert({{from, to}, next_edge_id_++}); std::string edge_label; if (control_edge) { edge_label = "style=\"dotted\" color=\"gray\" label=\"ctrl\""; } else if (instr->operand_count() > 1) { edge_label = StrFormat(R"( headlabel="%d", labeldistance=2)", operand_num); } constexpr char kEdgeFmt[] = R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)"; edges_.push_back(StrFormat(kEdgeFmt, InstructionId(from), InstructionId(to), (IsSmall(from) ? "empty" : "normal"), from->name(), to->name(), edge_label)); }; if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) { if (instr->parent() != computation_) { const HloInstruction* fusion = instr->parent()->FusionInstruction(); add_edge(fusion->operand(instr->parameter_number()), instr, 0); } } else { for (int64_t i = 0; i < instr->operand_count(); ++i) { add_edge(instr->operand(i), instr, i); } for (const HloInstruction* pred : instr->control_predecessors()) { add_edge(pred, instr, 0, true); } } } std::string HloDotDumper::GetInstructionTrivialComputationStr( const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kFusion) { return ""; } std::vector<std::string> lines; for (int64_t i = 0; i < instr->called_computations().size(); ++i) { optional<std::string> computation_type = MatchTrivialComputation(instr->called_computations()[i]); if (!computation_type) { continue; } if (instr->called_computations().size() == 1) { lines.push_back(StrFormat("Subcomputation: <b>%s</b>", HtmlLikeStringSanitize(*computation_type))); } else { lines.push_back(StrFormat("Subcomputation %d: <b>%s</b>", i, HtmlLikeStringSanitize(*computation_type))); } } return StrJoin(lines, "<br/>"); } const HloInstruction* HloDotDumper::GetNodeForEdge( const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kGetTupleElement) { instr = instr->operand(0); } while (instr->opcode() == HloOpcode::kFusion && ShouldShowFusionSubcomputation(instr)) { instr = instr->fused_expression_root(); } return instr; } NodeFilter MakeNodeRadiusAroundFilter( const HloInstruction* root, int64_t radius, const absl::flat_hash_set<const HloInstruction*>& boundary) { absl::flat_hash_map<const HloInstruction*, NodeFilterResult> nodes; std::deque<std::pair<const HloInstruction*, int64_t>> worklist; worklist.push_back({root, 0}); while (!worklist.empty()) { const HloInstruction* instr; int64_t depth; std::tie(instr, depth) = worklist.front(); worklist.pop_front(); nodes[instr] = kNormalNode; if (depth == radius) { continue; } if (boundary.contains(instr)) { continue; } if (instr == root || instr->opcode() != HloOpcode::kTuple) { for (const HloInstruction* operand : instr->operands()) { if (!nodes.contains(operand)) { int new_depth = (operand->opcode() == HloOpcode::kBitcast || instr->opcode() == HloOpcode::kBitcast) ? depth : depth + 1; worklist.push_back({operand, new_depth}); } } } for (const HloComputation* computation : instr->called_computations()) { worklist.push_back({computation->root_instruction(), depth + 1}); } if (instr->opcode() == HloOpcode::kConstant) { continue; } constexpr int kMaxUsersToRender = 16; if (instr->user_count() > kMaxUsersToRender) { nodes[instr] = kSomeUsersOmitted; continue; } for (const HloInstruction* user : instr->users()) { if (!nodes.contains(user)) { worklist.push_back({user, depth + 1}); } } } auto is_displayed = [&](const HloInstruction* instr) { return nodes.contains(instr) || instr->opcode() == HloOpcode::kConstant || instr->parent() != root->parent(); }; for (auto& kv : nodes) { const HloInstruction* instr = kv.first; NodeFilterResult& filter_result = kv.second; const auto& operands = instr->operands(); if (absl::c_any_of(operands, is_displayed) && !absl::c_all_of(operands, is_displayed)) { filter_result = kSomeOperandsOmitted; } else if (!operands.empty() && absl::c_none_of(operands, is_displayed)) { filter_result = kOmitNodeOperands; } if (filter_result == kSomeUsersOmitted && absl::c_all_of(instr->users(), is_displayed)) { filter_result = kNormalNode; } } nodes[root] = kHighlightNode; return NodeFilter( [=](const HloInstruction* instr) { auto it = nodes.find(instr); if (it != nodes.end()) { return it->second; } if (instr->parent() != root->parent()) { return kNormalNode; } return kHideNode; }, nodes.size()); } NodeFilter MakeNodeFromToFilter(const HloInstruction* from, const HloInstruction* to, int64_t max_nodes, bool* hit_limit) { *hit_limit = false; std::deque<std::vector<const HloInstruction*>> queue; queue.push_front({from}); absl::flat_hash_set<const HloInstruction*> visited; absl::flat_hash_set<const HloInstruction*> to_display = {from, to}; while (!queue.empty() && to_display.size() < max_nodes) { std::vector<const HloInstruction*> path = std::move(queue.front()); queue.pop_front(); if (!visited.insert(path.back()).second) { continue; } for (const auto* user : path.back()->users()) { if (user == to) { auto it = path.begin(); for (; it != path.end() && to_display.size() < max_nodes; ++it) { to_display.insert(*it); } if (it != path.end()) { *hit_limit = true; } } else if (!visited.count(user)) { auto new_path = path; new_path.push_back(user); queue.push_back(std::move(new_path)); } } } return NodeFilter([=](const HloInstruction* instr) { if (instr == from || instr == to) { return kHighlightNode; } return to_display.count(instr) ? kNormalNode : kHideNode; }); } absl::Mutex url_renderer_mu(absl::kConstInit); std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer ABSL_GUARDED_BY(url_renderer_mu) = nullptr; absl::Mutex fusion_visualizer_state_mu(absl::kConstInit); namespace { struct FusionVisualizerProgress { void AddState(absl::string_view dot, absl::string_view explanation, std::optional<std::string> to_highlight) { if (dot_graphs.empty() || dot_graphs.back() != dot) { dot_graphs.push_back(std::string(dot)); } frames.push_back({static_cast<int>(dot_graphs.size() - 1), std::string(explanation), to_highlight.value_or("")}); } std::vector<std::string> dot_graphs; struct FusionFrame { int dot_graph; std::string label; std::string to_highlight; }; std::vector<FusionFrame> frames; }; } static auto& fusion_visualizer_states TF_GUARDED_BY(fusion_visualizer_state_mu) = *new absl::flat_hash_map< std::pair<int64_t, int64_t>, FusionVisualizerProgress>(); static std::pair<int, int> FusionVisualizerStateKey( const HloComputation& computation) { return std::make_pair(computation.parent()->unique_id(), computation.unique_id()); } } static absl::StatusOr<std::string> CompressAndEncode(absl::string_view input) { class WritableStringFile : public tsl::WritableFile { public: explicit WritableStringFile(std::string* data) : data_(data){}; ~WritableStringFile() override = default; absl::Status Append(absl::string_view data) override { absl::StrAppend(data_, data); return absl::OkStatus(); } absl::Status Close() override { return absl::OkStatus(); } absl::Status Flush() override { return absl::OkStatus(); } absl::Status Sync() override { return absl::OkStatus(); } private: std::string* data_; }; std::string compressed; WritableStringFile f(&compressed); auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP(); tsl::io::ZlibOutputBuffer gz_file(&f, gz_opts.input_buffer_size, gz_opts.output_buffer_size, gz_opts); TF_RETURN_IF_ERROR(gz_file.Init()); TF_RETURN_IF_ERROR(gz_file.Append(input)); TF_RETURN_IF_ERROR(gz_file.Close()); std::string encoded; TF_RETURN_IF_ERROR(tsl::Base64Encode(compressed, &encoded)); return absl::StrReplaceAll(encoded, {{"_", "/"}, {"-", "+"}}); } static std::string EscapeJSONString(absl::string_view raw) { return absl::StrCat( "\"", absl::StrReplaceAll(raw, {{"\n", "\\n"}, {"\"", "\\\""}, {"\\", "\\\\"}}), "\""); } absl::StatusOr<std::string> WrapFusionExplorer( const FusionVisualizerProgress& visualizer_progress, absl::string_view graph_title) { if (visualizer_progress.frames.empty()) { return Internal("Empty"); } std::string dot_graphs = StrFormat("[%s]", StrJoin(visualizer_progress.dot_graphs, ", ", [&](std::string* out, const std::string& dot) { StrAppend(out, EscapeJSONString(dot)); })); std::string frames = StrJoin( visualizer_progress.frames, ", ", [&](std::string* out, const auto& p) { StrAppend(out, StrFormat("[%d, %s, %s]", p.dot_graph, EscapeJSONString(p.label), EscapeJSONString(p.to_highlight))); }); TF_ASSIGN_OR_RETURN(std::string dot_graphs_compressed, CompressAndEncode(dot_graphs)); return absl::StrReplaceAll( R"wrapper( <!DOCTYPE html> <html> <head> <meta charset="utf-8"> <style> html, body {height: 100%; text-align: center;} #rendered {height: 70%; width: 80%; border:1px solid black; margin: auto; } #label {width: 80%; margin: auto;} #performance_note { font-size: small; color: gray; } #frames_list { list-style: none; text-align: left; height: 20%; overflow: scroll; } #frames_list li { padding: 0.2em; margin: 0.2em; } .selected { background-color: #e0e0e0; } .selected a { color: black; text-decoration: none; } #rendered svg { height: 100% !important; width: 100% !important; } </style> </head> <body> <script src="https: integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ" crossorigin="anonymous"></script> <script src="https: </script> <title>Fusion Explorer: $TITLE</title> <div id='rendered'><center>Loading...</center></div> <ul id='frames_list'></ul> <p>Use j/k for keyboard navigation.</p> <p id='performance_note'>Loading data...</p> <script> <!-- const renderCache = {}; const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm'); const hpccWasm = window["@hpcc-js/wasm"]; const getIdFromHash = () => { let hash = window.location.hash; if (hash.indexOf('frame') == -1) { return 0; } return parseInt(window.location.hash.substring('#frame'.length, window.location.hash.length)); } const renderCurrentFrame = () => { if (!window.loaded) { return; } const frames_list = document.getElementById('frames_list'); const currId = getIdFromHash(); for (let selected of frames_list.getElementsByClassName('selected')) { selected.classList.remove('selected'); } const selected = frames_list.children[currId]; selected.classList.add('selected'); selected.scrollIntoView(); const frame = frames[currId]; const dot_ptr = frame[0]; let dot_txt = window.dots[dot_ptr]; const label = frame[1]; document.getElementById('performance_note').innerText = "Rendering..."; const results = cssregex.exec(dot_txt) let css_data = '' if (results !== null) { css_data = results[1].replace(/\s*data:.*\s*,/,''); css_data = unescape(css_data); dot_txt = dot_txt.replace(cssregex, ''); } let render_start = performance.now(); const render_callback = svg => { renderCache[dot_ptr] = svg; var area = document.getElementById('rendered'); area.innerHTML = `${svg}<style>${css_data}</style>`; var panzoom = svgPanZoom(area.children[0], { zoomEnabled: true, controlIconsEnabled: true, maxZoom: 200, }); var to_highlight = frame[2].length ? document.querySelector(`${frame[2]}`) : null; if (to_highlight) { to_highlight.style.setProperty('fill', 'red'); } document.getElementById('performance_note').innerText = `Rendering took ${(performance.now() - render_start).toFixed(2)}ms`; let text_nodes = document.getElementsByTagName("text"); for (var el of text_nodes) { if (title_to_id.has(el.innerHTML)) { el.style.cursor = "pointer"; } } }; if (renderCache[dot_ptr]) { render_callback(renderCache[dot_ptr]); } else { hpccWasm.graphviz.layout(dot_txt, "svg", "dot").then(render_callback); } }; const update = (delta) => { let currId = getIdFromHash(); currId = (currId + delta + frames.length) % frames.length; window.location.hash = `#frame${currId}` }; const renderFrameList = () => { const currId = getIdFromHash(); const frames_list = document.getElementById('frames_list'); for (let i=0; i<frames.length; i++) { const f = frames[i]; let frame_descr = f[1]; const rendered = document.createElement("li"); if (frame_descr == "") { frame_descr = "Unnamed state"; } rendered.innerHTML = `<a href="#frame${i}">${frame_descr}</a>`; if (i == currId) { rendered.classList.add('selected'); } frames_list.appendChild(rendered); } }; const decompress = async function(compressed) { const ds = new DecompressionStream('gzip'); const in_fetch = await fetch(`data:application/octet-stream;base64,${compressed}`); const in_blob = await in_fetch.blob(); const out_stream = in_blob.stream().pipeThrough(ds); const out_blob = await new Response(out_stream).blob(); return await out_blob.text(); } const dots_compressed = "$DOTS"; const frames = [$FRAMES]; let loaded = false; window.addEventListener('hashchange', () => { renderCurrentFrame(); }); window.addEventListener("keydown", (event) => { if (event.defaultPrevented) { return; } if (event.key == "j") { update(1); } else if (event.key == "k") { update(-1); } else { return; } event.preventDefault(); }, true); document.addEventListener("DOMContentLoaded", () => { decompress(dots_compressed).then(text => { window.dots = JSON.parse(text); window.loaded = true; renderFrameList(); renderCurrentFrame(); }); window.title_to_id = new Map(); for (let i=0; i < frames.length; i++) { title_to_id.set(frames[i][1], i); } document.addEventListener("click", (event) => { let txt = event.target.innerHTML; if (title_to_id.has(txt)) { let id = title_to_id.get(txt); window.location.hash = `#frame${id}`; } }); }); </script> </body> </html> )wrapper", {{"$DOTS", dot_graphs_compressed}, {"$FRAMES", frames}, {"$TITLE", graph_title}}); } static std::string GraphTitle(const HloComputation& computation) { return absl::StrCat(computation.parent()->name(), "_", computation.name()); } absl::StatusOr<std::string> WrapFusionExplorer( const HloComputation& computation) { absl::MutexLock lock(&fusion_visualizer_state_mu); const FusionVisualizerProgress& visualizer_progress = fusion_visualizer_states[FusionVisualizerStateKey(computation)]; return WrapFusionExplorer(visualizer_progress, GraphTitle(computation)); } static absl::StatusOr<std::string> WrapDotInHtml(absl::string_view dot, absl::string_view title) { FusionVisualizerProgress progress; progress.AddState(dot, title, std::nullopt); return WrapFusionExplorer(progress, title); } static absl::StatusOr<std::string> WrapDotInFormat( const HloComputation& computation, absl::string_view dot, RenderedGraphFormat format) ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) { switch (format) { case RenderedGraphFormat::kUrl: CHECK(url_renderer != nullptr) << "Should have checked url_renderer != null before calling."; return (*url_renderer)(dot); case RenderedGraphFormat::kHtml: return WrapDotInHtml(dot, GraphTitle(computation)); case RenderedGraphFormat::kDot: return std::string(dot); } } void RegisterGraphToURLRenderer( std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) { absl::MutexLock lock(&url_renderer_mu); if (url_renderer != nullptr) { LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call " "wins, but because order of initialization in C++ is " "nondeterministic, this may not be what you want."; } delete url_renderer; url_renderer = new std::function<absl::StatusOr<std::string>(absl::string_view)>( std::move(renderer)); } void RegisterFusionState(const HloComputation& computation, absl::string_view label, const HloInstruction& consumer, const HloInstruction* producer) { absl::MutexLock lock(&fusion_visualizer_state_mu); FusionVisualizerProgress& fusion_progress = fusion_visualizer_states[FusionVisualizerStateKey(computation)]; static constexpr int kRenderRadius = 4; absl::flat_hash_set<const HloInstruction*> render_boundary; for (const HloInstruction* user : consumer.users()) { render_boundary.insert(user); } HloDotDumper dumper( consumer.parent(), StrCat("Rendering of ", kRenderRadius, " nodes around fusion consumer"), consumer.GetModule()->config().debug_options(), {}, MakeNodeRadiusAroundFilter(&consumer, kRenderRadius, render_boundary)); std::string dot_txt = dumper.Dump(); std::optional<std::string> producer_to_highlight; if (producer) { producer_to_highlight = dumper.CssIdForInstruction(*producer); } fusion_progress.AddState(dot_txt, label, producer_to_highlight); } absl::StatusOr<std::string> RenderGraph( const HloComputation& computation, absl::string_view label, const DebugOptions& debug_options, RenderedGraphFormat format, HloRenderOptions hlo_render_options, std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map) { absl::MutexLock lock(&url_renderer_mu); if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) { return Unavailable("Can't render as URL; no URL renderer was registered."); } std::string rendered_dot = HloDotDumper(&computation, label, debug_options, hlo_render_options, NodeFilter(), color_map) .Dump(); return WrapDotInFormat(computation, rendered_dot, format); } absl::StatusOr<std::string> RenderAllComputationsToHtml( const HloModule& module) { FusionVisualizerProgress progress; std::vector<HloInstruction*> instrs = module.entry_computation()->MakeInstructionPostOrder(); absl::c_reverse(instrs); for (const HloInstruction* instr : instrs) { if (absl::c_linear_search( std::vector<HloOpcode>{HloOpcode::kConstant, HloOpcode::kGetTupleElement}, instr->opcode())) { continue; } HloRenderOptions opts; opts.show_fusion_subcomputations = true; opts.show_backend_config = true; opts.show_while_subcomputations = instr->opcode() == HloOpcode::kWhile; static constexpr int64_t max_nodes_to_render = 100; absl::flat_hash_set<const HloInstruction*> render_boundary; NodeFilter filter = MakeNodeRadiusAroundFilter(instr, 2, render_boundary); if (filter.GetNumRendered().value_or(1) > max_nodes_to_render) { filter = MakeNodeRadiusAroundFilter(instr, 1, render_boundary); } std::string dot = HloDotDumper(module.entry_computation(), instr->name(), module.config().debug_options(), opts, filter) .Dump(); progress.AddState(dot, instr->name(), std::nullopt); } return WrapFusionExplorer(progress, module.name()); } absl::StatusOr<std::string> RenderNeighborhoodAround( const HloInstruction& node, int radius, RenderedGraphFormat format, HloRenderOptions hlo_render_options, const absl::flat_hash_set<const HloInstruction*>& boundary, std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map) { absl::MutexLock lock(&url_renderer_mu); if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) { return FailedPrecondition( "Can't render as URL; no URL renderer was registered."); } std::string label = StrCat("Neighborhood of ", radius, " nodes around ", node.name()); std::string rendered_dot = HloDotDumper( node.parent(), label, node.GetModule()->config().debug_options(), hlo_render_options, MakeNodeRadiusAroundFilter(&node, radius, boundary), color_map) .Dump(); return WrapDotInFormat(*node.parent(), rendered_dot, format); } absl::StatusOr<std::string> RenderAllPathsFromTo( const HloInstruction& from, const HloInstruction& to, int64_t max_nodes, RenderedGraphFormat format, HloRenderOptions hlo_render_options) { absl::MutexLock lock(&url_renderer_mu); if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) { return FailedPrecondition( "Can't render as URL; no URL renderer was registered."); } CHECK_EQ(from.parent(), to.parent()) << "Nodes must be in same computation!"; auto debug_options = from.GetModule()->config().debug_options(); bool hit_limit = false; NodeFilter filter = MakeNodeFromToFilter(&from, &to, max_nodes, &hit_limit); std::string label; if (!hit_limit) { label = StrCat("All paths from ", from.name(), " to ", to.name()); } else { label = StrCat(max_nodes, " nodes on the shortest paths from ", from.name(), " to ", to.name(), "<br/><br/>***SHOWING ONLY A SUBSET OF ALL PATHS BETWEEN " "NODES***<br/><br/>"); } std::string rendered_dot = HloDotDumper(from.parent(), label, debug_options, hlo_render_options, filter) .Dump(); return WrapDotInFormat(*from.parent(), rendered_dot, format); } }
#include "xla/service/hlo_graph_dumper.h" #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_utils.h" #include "xla/xla.pb.h" namespace xla { namespace { using absl::StrCat; using ::testing::HasSubstr; using HloGraphDumperTest = HloTestBase; std::string TestName() { return ::testing::UnitTest::GetInstance()->current_test_info()->name(); } TEST_F(HloGraphDumperTest, NestedFusion) { HloComputation::Builder b("b"); auto shape = ShapeUtil::MakeShape(F32, {10, 100}); std::vector<HloInstruction*> params; for (int i = 0; i <= 4; ++i) { params.push_back(b.AddInstruction( HloInstruction::CreateParameter(i, shape, StrCat("param", i)))); } std::vector<HloInstruction*> sums; sums.push_back(b.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kAdd, params[0], params[1]))); for (int i = 0; i <= 2; ++i) { sums.push_back(b.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kAdd, sums[i], params[i + 2]))); } HloModuleConfig config; HloModule m(TestName(), config); m.AddEntryComputation(b.Build()); HloComputation* root_computation = m.entry_computation(); auto* outer_fusion = root_computation->CreateFusionInstruction( {sums[3], sums[2], sums[1], sums[0]}, HloInstruction::FusionKind::kLoop); std::vector<HloInstruction*> fused_sums; for (auto* instr : outer_fusion->fused_instructions_computation() ->MakeInstructionPostOrder()) { if (instr->opcode() == HloOpcode::kAdd) { fused_sums.push_back(instr); } } auto* inner_fusion = outer_fusion->fused_instructions_computation()->CreateFusionInstruction( {fused_sums[1], fused_sums[0]}, HloInstruction::FusionKind::kLoop); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*root_computation, "", DebugOptions(), RenderedGraphFormat::kDot)); for (const HloComputation* computation : {root_computation, inner_fusion->fused_instructions_computation(), outer_fusion->fused_instructions_computation()}) { for (const HloInstruction* instruction : computation->instructions()) { EXPECT_THAT(graph, HasSubstr(instruction->name())); } } const HloInstruction* inner_sum = nullptr; for (const HloInstruction* instruction : inner_fusion->fused_instructions_computation()->instructions()) { if (instruction->opcode() == HloOpcode::kAdd) { inner_sum = instruction; break; } } ASSERT_NE(inner_sum, nullptr); TF_ASSERT_OK_AND_ASSIGN(std::string neighborhood_graph, RenderNeighborhoodAround(*inner_sum, 1, RenderedGraphFormat::kDot)); EXPECT_THAT(neighborhood_graph, HasSubstr(inner_sum->name())); } TEST_F(HloGraphDumperTest, Constant) { HloComputation::Builder b("b"); auto instruction = b.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-42))); instruction->SetAndSanitizeName("i_am_a_constant_root_instruction"); HloModuleConfig config; HloModule m(TestName(), config); HloComputation* root_computation = m.AddEntryComputation(b.Build()); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*root_computation, "an_empty_graph", DebugOptions(), RenderedGraphFormat::kDot)); EXPECT_THAT(graph, HasSubstr("an_empty_graph")); } TEST_F(HloGraphDumperTest, TupleConstant) { Shape tuple_shape = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(S32, {4, 5})}); HloComputation::Builder b("b"); auto constant = b.AddInstruction( HloInstruction::CreateConstant(Literal::CreateFromShape(tuple_shape))); auto gte = b.AddInstruction(HloInstruction::CreateGetTupleElement( ShapeUtil::MakeShape(F32, {3, 2}), constant, 0)); HloModuleConfig config; HloModule m(TestName(), config); HloComputation* root_computation = m.AddEntryComputation(b.Build(gte)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*root_computation, "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); EXPECT_THAT(graph, HasSubstr("tuple_constant")); EXPECT_THAT(graph, HasSubstr("constant (f32[3,2], s32[4,5])")); } TEST_F(HloGraphDumperTest, Compare) { const char* hlo_string = R"( HloModule comp ENTRY comp { param.0 = f32[10] parameter(0) param.1 = f32[10] parameter(1) ROOT lt = pred[10] compare(param.0, param.1), direction=LT })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); EXPECT_THAT(graph, HasSubstr("direction=LT")); } TEST_F(HloGraphDumperTest, HasStatisticsViz) { const char* hlo_string = R"( HloModule comp ENTRY comp { param.0 = f32[10] parameter(0), statistics={visualizing_index=0,stat-0=0.5} param.1 = f32[10] parameter(1), statistics={visualizing_index=1,stat-0=55.5,stat-1=44.4} ROOT lt = pred[10] compare(param.0, param.1), direction=LT })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); } TEST_F(HloGraphDumperTest, RootIsConstant) { const char* hlo_string = R"( HloModule indexed_conditional %then_branch (empty: ()) -> f32[] { %empty = () parameter(0) ROOT %then = f32[] constant(1) } %else_branch (empty.1: ()) -> f32[] { %empty.1 = () parameter(0) ROOT %else = f32[] constant(2) } ENTRY %conditional_select (constant: pred[]) -> (f32[]) { %constant = pred[] parameter(0) %emptytuple = () tuple() %conditional = f32[] conditional(pred[] %constant, () %emptytuple, () %emptytuple), true_computation=%then_branch, false_computation=%else_branch ROOT %t = (f32[]) tuple(f32[] %conditional) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); } TEST_F(HloGraphDumperTest, OverrideColors) { const char* hlo_string = R"( HloModule comp ENTRY comp { param.0 = f32[10] parameter(0) param.1 = f32[10] parameter(1) ROOT lt = pred[10] compare(param.0, param.1), direction=LT })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); absl::flat_hash_map<const HloInstruction*, ColorStats> color_map; ColorStats color_stats_1; color_stats_1.color = "#A9C343"; color_stats_1.stats = absl::StrFormat("%.3f", 1.11); ColorStats color_stats_2; color_stats_2.color = "#BC8A3F"; color_stats_2.stats = absl::StrFormat("%.3f", 2.22); color_map[module->entry_computation()->GetInstructionWithName("param.0")] = color_stats_1; color_map[module->entry_computation()->GetInstructionWithName("param.1")] = color_stats_2; HloRenderOptions hlo_render_options; hlo_render_options.override_node_colors = true; TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot, hlo_render_options, color_map)); EXPECT_THAT(graph, HasSubstr("#A9C343")); EXPECT_THAT(graph, HasSubstr("1.110")); EXPECT_THAT(graph, HasSubstr("#BC8A3F")); EXPECT_THAT(graph, HasSubstr("2.220")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
10bb58ac-bb24-4434-b6f2-4ba070e2f308
cpp
tensorflow/tensorflow
reduce_scatter_reassociate
third_party/xla/xla/service/reduce_scatter_reassociate.cc
third_party/xla/xla/service/reduce_scatter_reassociate_test.cc
#include "xla/service/reduce_scatter_reassociate.h" #include <optional> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/all_reduce_key.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/hlo_domain_map.h" #include "tsl/platform/errors.h" namespace xla { namespace { bool AreCompatible(const HloReduceScatterInstruction *rs0, const HloReduceScatterInstruction *rs1, ReductionKind op_kind) { std::optional<AllReduceKey> key0 = GetAllReduceKey(rs0); std::optional<AllReduceKey> key1 = GetAllReduceKey(rs1); auto kind0 = MatchReductionComputation(rs0->to_apply()); auto dims_match = rs0->scatter_dimension() == rs1->scatter_dimension(); return key0 && key1 && kind0 && *key0 == *key1 && kind0 == op_kind && dims_match; } } absl::StatusOr<bool> ReduceScatterReassociate::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { if (hlo_query::ContainsLayoutConstrainedCollective( *module, HloOpcode::kReduceScatter)) { VLOG(1) << "Skip ReduceScatterReassociate because the module contains reduce-" "scatter with constrained layouts"; return false; } int64_t next_channel_id = hlo_query::NextChannelId(*module); bool changed = false; for (auto computation : module->computations(execution_threads)) { for (HloInstruction *inst : computation->MakeInstructionPostOrder()) { std::optional<ReductionKind> kind = MatchReductionInstruction(inst); if (!kind || inst->operand(0)->opcode() != HloOpcode::kReduceScatter || inst->operand(1)->opcode() != HloOpcode::kReduceScatter || !inst->shape().IsArray()) { continue; } auto *rs0 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(0)); auto *rs1 = Cast<HloReduceScatterInstruction>(inst->mutable_operand(1)); if (!AreCompatible(rs0, rs1, *kind)) { VLOG(2) << "Reduce-Scatter operations are not compatible, skipping"; continue; } if (rs0->user_count() != 1 || rs1->user_count() != 1) { VLOG(2) << "Reduce-Scatter operations have > 1 users"; continue; } HloInstruction *new_op = computation->AddInstruction(inst->CloneWithNewOperands( rs0->mutable_operand(0)->shape(), {rs0->mutable_operand(0), rs1->mutable_operand(0)})); HloInstruction *new_rs = computation->AddInstruction( rs0->CloneWithNewOperands(inst->shape(), {new_op})); if (new_rs->channel_id()) { new_rs->set_channel_id(next_channel_id++); } TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_rs)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs0)); if (rs0 != rs1) { TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs1)); } changed = true; } } return changed; } }
#include "xla/service/reduce_scatter_reassociate.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace m = xla::testing::opcode_matchers; class ReduceScatterReassociateTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<HloModule>> RunPass( absl::string_view hlo_module, bool expect_change) { TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module)); auto changed = ReduceScatterReassociate().Run(module.get()); if (!changed.ok()) { return changed.status(); } EXPECT_EQ(changed.value(), expect_change); return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module)); } size_t ReduceScatterCount(std::unique_ptr<HloModule>& module) { return absl::c_count_if(module->entry_computation()->instructions(), HloPredicateIsOp<HloOpcode::kReduceScatter>); } }; TEST_F(ReduceScatterReassociateTest, Simple) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum ROOT add = f32[4] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::ReduceScatter(m::Add(m::Parameter(0), m::Parameter(1)))); EXPECT_EQ(ReduceScatterCount(module), 1); } TEST_F(ReduceScatterReassociateTest, SimpleWithConstrainLayout) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, constrain_layout=true, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, constrain_layout=true, to_apply=sum ROOT add = f32[4] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, SimpleChain) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) p2 = f32[8] parameter(2) p3 = f32[8] parameter(3) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum add0 = f32[4] add(rs0, rs1) add1 = f32[4] add(add0, rs2) ROOT add2 = f32[4] add(add1, rs3) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT( module->entry_computation()->root_instruction(), m::ReduceScatter(m::Add( m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)), m::Parameter(3)))); EXPECT_EQ(ReduceScatterCount(module), 1); } TEST_F(ReduceScatterReassociateTest, SimpleTree) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) p2 = f32[8] parameter(2) p3 = f32[8] parameter(3) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum rs2 = f32[4] reduce-scatter(p2), dimensions={0}, to_apply=sum rs3 = f32[4] reduce-scatter(p3), dimensions={0}, to_apply=sum add0 = f32[4] add(rs0, rs1) add1 = f32[4] add(rs2, rs3) ROOT add2 = f32[4] add(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT( module->entry_computation()->root_instruction(), m::ReduceScatter(m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Add(m::Parameter(2), m::Parameter(3))))); EXPECT_EQ(ReduceScatterCount(module), 1); } TEST_F(ReduceScatterReassociateTest, MismatchOp0) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } max { a = f32[] parameter(0) b = f32[] parameter(1) ROOT r = f32[] maximum(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max ROOT add = f32[4] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, MismatchOp1) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } max { a = f32[] parameter(0) b = f32[] parameter(1) ROOT r = f32[] maximum(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=max rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=max ROOT add = f32[4] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, MismatchDimension) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8,8] parameter(0) p1 = f32[8,8] parameter(1) rs0 = f32[8,8] reduce-scatter(p0), dimensions={0}, to_apply=sum rs1 = f32[8,8] reduce-scatter(p1), dimensions={1}, to_apply=sum ROOT add = f32[8,8] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, MismatchReplicaGroups) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0}}, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={}, to_apply=sum ROOT add = f32[4] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, MismatchHasChannelId) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, channel_id=3, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum ROOT add = f32[4] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, MismatchUseGlobalDeviceId) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1}}, channel_id=3, use_global_device_ids=true, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, replica_groups={{0,1}}, channel_id=4, to_apply=sum ROOT add = f32[4] add(rs0, rs1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, NotSingleUser) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum rs1 = f32[4] reduce-scatter(p1), dimensions={0}, to_apply=sum add = f32[4] add(rs0, rs1) ROOT t = (f32[4], f32[4]) tuple(rs0, add) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(ReduceScatterReassociateTest, DoubleUse) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) rs0 = f32[4] reduce-scatter(p0), dimensions={0}, to_apply=sum add = f32[4] add(rs0, rs0) ROOT c = f32[4] copy(add) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_reassociate_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3bc61d67-4a76-418b-a671-c9c315c7a6bd
cpp
tensorflow/tensorflow
change_op_data_type
third_party/xla/xla/service/change_op_data_type.cc
third_party/xla/xla/service/change_op_data_type_test.cc
#include "xla/service/change_op_data_type.h" #include <optional> #include "xla/service/hlo_creation_utils.h" #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3) #include "xla/service/cpu/onednn_contraction_rewriter.h" #endif namespace xla { namespace { std::optional<PrimitiveType> GetUniformOperandType( const HloInstruction* instr) { std::optional<PrimitiveType> type; for (const HloInstruction* operand : instr->operands()) { if (!type.has_value()) { type = operand->shape().element_type(); } else if (operand->shape().element_type() != type.value()) { return std::nullopt; } } return type; } } absl::StatusOr<bool> ChangeOpDataType::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; HloCloner default_cloner = [](const HloInstruction* inst, const Shape& shape, absl::Span<HloInstruction* const> operands) { return inst->CloneWithNewOperands(shape, operands); }; HloCloner cloner = cloner_ ? cloner_ : default_cloner; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { std::optional<PrimitiveType> operand_type = GetUniformOperandType(instr); if (!op_matcher_(instr) || !operand_type.has_value() || !instr->shape().IsArray() || instr->opcode() == HloOpcode::kParameter) { continue; } const PrimitiveType from_type = *operand_type; auto it = to_type_map_.find(from_type); if (it == to_type_map_.end()) { continue; } #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3) if (cpu::OneDnnContractionRewriter::ShouldRewriteInstr(instr, true)) { continue; } #endif const PrimitiveType to_type = it->second; absl::InlinedVector<HloInstruction*, 8> new_operands; for (HloInstruction* operand : instr->mutable_operands()) { new_operands.push_back(MakeConvertToHlo(operand, to_type)); } Shape new_shape = instr->shape(); new_shape.set_element_type(to_type); HloInstruction* new_instr = comp->AddInstruction(cloner(instr, new_shape, new_operands)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction( instr, MakeConvertToHlo(new_instr, from_type))); changed = true; } } return changed; } }
#include "xla/service/change_op_data_type.h" #include <string> #include <tuple> #include <vector> #include "absl/types/span.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace m = ::xla::match; class ChangeOpDataTypeTest : public HloTestBase { public: ChangeOpDataTypeTest() : HloTestBase(false, false) {} }; TEST_F(ChangeOpDataTypeTest, Simple) { const char* const kModuleStr = R"( HloModule module ENTRY entry { ROOT op = add(f16[10] parameter(0), f16[10] parameter(1)) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); ChangeOpDataType pass(F16, F32, HloPredicateTrue); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}), m::Convert(m::Parameter(1)).WithShape(F32, {10}))) .WithShape(F16, {10}))); } TEST_F(ChangeOpDataTypeTest, AllTypesMustBeSame) { const char* const kModuleStr = R"( HloModule module ENTRY entry { ROOT op = f16[1] dynamic-slice(f16[10] parameter(0), s32[1] parameter(1)), dynamic_slice_sizes={1} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); ChangeOpDataType pass(F16, F32, HloPredicateTrue); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(ChangeOpDataTypeTest, DotAndConv) { const char* const kModuleStr = R"( HloModule module ENTRY entry { dot = f16[10,10] dot(f16[10,10] parameter(0), f16[10,10] parameter(1)), lhs_contracting_dims={1}, rhs_contracting_dims={0} conv = f16[1,2,1] convolution(f16[1,2,1] parameter(2), f16[1,1,1] parameter(3)), window={size=1}, dim_labels=b0f_0io->b0f root = tuple(dot, conv) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); ChangeOpDataType pass( F16, F32, HloPredicateIsOp<HloOpcode::kDot, HloOpcode::kConvolution>); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Convert( m::Dot(m::Convert(m::Parameter(0)).WithShape(F32, {10, 10}), m::Convert(m::Parameter(1)).WithShape(F32, {10, 10}))) .WithShape(F16, {10, 10}), m::Convert(m::Convolution( m::Convert(m::Parameter(2)).WithShape(F32, {1, 2, 1}), m::Convert(m::Parameter(3)).WithShape(F32, {1, 1, 1}))) .WithShape(F16, {1, 2, 1})))); } TEST_F(ChangeOpDataTypeTest, SimpleWithCloner) { const char* const kModuleStr = R"( HloModule module ENTRY entry { ROOT op = add(f16[10] parameter(0), f16[10] parameter(1)) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); HloPredicate matcher = HloPredicateTrue; int count = 0; ChangeOpDataType::HloCloner cloner = [&count](const HloInstruction* instr, const Shape& shape, absl::Span<HloInstruction* const> operands) { count++; return instr->CloneWithNewOperands(shape, operands); }; ChangeOpDataType pass(F16, F32, matcher, cloner); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_EQ(count, 1); } TEST_F(ChangeOpDataTypeTest, SimpleWithMultipleTypes) { const char* const kModuleStr = R"( HloModule module ENTRY entry { op1 = add(f16[10] parameter(0), f16[10] parameter(1)) op2 = add(u16[10] parameter(2), u16[10] parameter(3)) ROOT tup = (f16[10], u16[10]) tuple(op1, op2) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); HloPredicate matcher = HloPredicateTrue; ChangeOpDataType pass({{F16, F32}, {U16, U32}}, matcher); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kTuple); EXPECT_EQ(root->operand_count(), 2); EXPECT_THAT( root->operand(0), GmockMatch( m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}), m::Convert(m::Parameter(1)).WithShape(F32, {10}))) .WithShape(F16, {10}))); EXPECT_THAT( root->operand(1), GmockMatch( m::Convert(m::Add(m::Convert(m::Parameter(2)).WithShape(U32, {10}), m::Convert(m::Parameter(3)).WithShape(U32, {10}))) .WithShape(U16, {10}))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2292926a-6678-41a1-8dd4-0c5f8f61af9c
cpp
tensorflow/tensorflow
collective_permute_decomposer
third_party/xla/xla/service/collective_permute_decomposer.cc
third_party/xla/xla/service/collective_permute_decomposer_test.cc
#include "xla/service/collective_permute_decomposer.h" #include <cstdint> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/status/status.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/graphcycles/graphcycles.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" namespace xla { namespace { using SourceTargetPair = std::pair<int64_t, int64_t>; using SourceTargetPairs = std::vector<SourceTargetPair>; bool HasCycles(const SourceTargetPairs& pairs) { GraphCycles graph; absl::flat_hash_map<int64_t, int32_t> replica_to_node_id; auto get_node_id = [&](int64_t replica) { auto it_and_inserted = replica_to_node_id.emplace(replica, -1); auto it = it_and_inserted.first; auto inserted = it_and_inserted.second; if (inserted) { it->second = graph.NewNode(); } return it->second; }; for (auto pair : pairs) { auto source = get_node_id(pair.first); auto target = get_node_id(pair.second); VLOG(3) << "See source " << source << " -> target " << target; if (!graph.InsertEdge(source, target)) { VLOG(3) << "Detected cycles"; return true; } } return false; } bool ShouldDecompose(const HloCollectivePermuteInstruction& collective_permute, int64_t threshold_in_bytes) { if (!collective_permute.channel_id().has_value()) { return false; } const Shape& result_shape = collective_permute.shape(); if (!result_shape.IsArray()) { return false; } if (ShapeUtil::ByteSizeOf(result_shape) < threshold_in_bytes) { return false; } return !HasCycles(collective_permute.source_target_pairs()); } bool MayPipeline(const HloCollectivePermuteInstruction& collective_permute) { const HloInstruction* data = collective_permute.operand(0); return (data->opcode() == HloOpcode::kGetTupleElement && data->operand(0)->opcode() == HloOpcode::kParameter); } absl::Status DecomposeCollectivePermute( HloCollectivePermuteInstruction* collective_permute, HloComputation* computation, const std::string& pipeline_decision) { int64_t channel_id = collective_permute->channel_id().value(); HloInstruction* data = collective_permute->mutable_operand(0); const Shape& data_shape = data->shape(); const OpMetadata& metadata = collective_permute->metadata(); const xla::FrontendAttributes& old_attributes = collective_permute->frontend_attributes(); xla::FrontendAttributes attributes; std::string source_target_pairs_string = "{" + absl::StrJoin(collective_permute->source_target_pairs(), ",", absl::PairFormatter( [](std::string* out, int64_t value) { absl::StrAppend(out, "{", value); }, ",", [](std::string* out, int64_t value) { absl::StrAppend(out, value, "}"); })) + "}"; attributes.mutable_map()->insert(old_attributes.map().begin(), old_attributes.map().end()); (*attributes.mutable_map())[kSendRecvSourceTargetPairsAttr] = source_target_pairs_string; HloInstruction* after_all = computation->AddInstruction(HloInstruction::CreateToken()); HloInstruction* recv = computation->AddInstruction( HloInstruction::CreateRecv(data_shape, after_all, channel_id)); recv->add_frontend_attributes(attributes); recv->set_metadata(metadata); HloInstruction* send = computation->AddInstruction( HloInstruction::CreateSend(data, after_all, channel_id)); send->add_frontend_attributes(attributes); send->set_metadata(metadata); HloInstruction* recv_done = computation->AddInstruction(HloInstruction::CreateRecvDone(recv)); HloInstruction* send_done = computation->AddInstruction(HloInstruction::CreateSendDone(send)); TF_RETURN_IF_ERROR(send->AddControlDependencyTo(recv_done)); HloInstruction* recv_data = computation->AddInstruction( HloInstruction::CreateGetTupleElement(recv_done, 0)); TF_RETURN_IF_ERROR(collective_permute->ReplaceAllUsesWith(recv_data)); TF_RETURN_IF_ERROR( computation->RemoveInstructionAndUnusedOperands(collective_permute)); if (!pipeline_decision.empty()) { xla::FrontendAttributes attributes; (*attributes.mutable_map())[kSendRecvPipelineAttr] = pipeline_decision; send->add_frontend_attributes(attributes); send_done->add_frontend_attributes(attributes); recv->add_frontend_attributes(attributes); recv_done->add_frontend_attributes(attributes); } return absl::OkStatus(); } bool IsForwardCycle(const SourceTargetPair& backedge, const SourceTargetPairs& others) { int64_t num_pairs = others.size() + 1; if (backedge.first != num_pairs - 1 || backedge.second != 0) { return false; } for (int64_t i = 0; i < num_pairs - 1; ++i) { const SourceTargetPair& pair = others[i]; if (pair.first != i || pair.second != i + 1) { return false; } } return true; } bool IsBackwardCycle(const SourceTargetPair& backedge, const SourceTargetPairs& others) { int64_t num_pairs = others.size() + 1; if (backedge.first != 0 || backedge.second != num_pairs - 1) { return false; } for (int64_t i = 0; i < num_pairs - 1; ++i) { const SourceTargetPair& pair = others[i]; if (pair.first != i + 1 || pair.second != i) { return false; } } return true; } std::optional<std::pair<HloCollectivePermuteInstruction*, HloCollectivePermuteInstruction*>> CheckCyclePatterns(HloCollectivePermuteInstruction* cp0, HloCollectivePermuteInstruction* cp1) { const SourceTargetPairs& cp0_pairs = cp0->source_target_pairs(); const SourceTargetPairs& cp1_pairs = cp1->source_target_pairs(); if (cp0_pairs.size() == 1) { if (IsForwardCycle(cp0_pairs.front(), cp1_pairs) || IsBackwardCycle(cp0_pairs.front(), cp1_pairs)) { return std::make_pair(cp0, cp1); } } if (cp1_pairs.size() == 1) { if (IsForwardCycle(cp1_pairs.front(), cp0_pairs) || IsBackwardCycle(cp1_pairs.front(), cp0_pairs)) { return std::make_pair(cp1, cp0); } } return std::nullopt; } } absl::StatusOr<bool> CollectivePermuteDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; std::vector<HloComputation*> all_computations = module->MakeComputationPostOrder(execution_threads); absl::flat_hash_set<HloComputation*> while_bodies; for (auto iter = all_computations.rbegin(); iter != all_computations.rend(); ++iter) { HloComputation* computation = *iter; bool may_pipeline = while_bodies.contains(computation); std::vector<HloCollectivePermuteInstruction*> cps_to_decompose; HloCollectivePermuteInstruction* cp0_to_pipeline = nullptr; HloCollectivePermuteInstruction* cp1_to_pipeline = nullptr; for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) { if (hlo->opcode() == HloOpcode::kWhile) { while_bodies.insert(hlo->while_body()); continue; } if (hlo->opcode() != HloOpcode::kCollectivePermute) { continue; } HloCollectivePermuteInstruction* cp = Cast<HloCollectivePermuteInstruction>(hlo); if (!ShouldDecompose(*cp, threshold_in_bytes_)) { continue; } cps_to_decompose.push_back(cp); if (!while_bodies.contains(computation) || !may_pipeline) { continue; } if (cp0_to_pipeline != nullptr && cp1_to_pipeline != nullptr) { continue; } if (!MayPipeline(*cp)) { continue; } if (cp0_to_pipeline == nullptr) { cp0_to_pipeline = cp; continue; } auto optional_pair = CheckCyclePatterns(cp0_to_pipeline, cp); if (optional_pair.has_value()) { cp0_to_pipeline = optional_pair.value().first; cp1_to_pipeline = optional_pair.value().second; } } for (HloCollectivePermuteInstruction* cp : cps_to_decompose) { std::string pipeline_decision; if (cp0_to_pipeline == cp) { pipeline_decision = "0"; } else if (cp1_to_pipeline == cp) { pipeline_decision = "1"; } TF_RETURN_IF_ERROR( DecomposeCollectivePermute(cp, computation, pipeline_decision)); } if (!cps_to_decompose.empty()) { changed = true; } } return changed; } }
#include "xla/service/collective_permute_decomposer.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { using ::testing::HasSubstr; namespace op = xla::testing::opcode_matchers; using CollectivePermuteDecomposerTest = HloTestBase; TEST_F(CollectivePermuteDecomposerTest, WithCycleNotTransformed) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[] replica-id() ROOT cp = u32[] collective-permute(p), channel_id=1, source_target_pairs={{0,1}, {1,0}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectivePermuteDecomposerTest, WithContextDataNotTransformed) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[] replica-id() ROOT cp = (u32[], u32[], u32[], u32[]) collective-permute(p), channel_id=1, source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectivePermuteDecomposerTest, TransformedExplicitChannelId) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[] replica-id() ROOT cp = u32[] collective-permute(p), channel_id=1, source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); auto check_metadata = [](const HloInstruction* inst) { EXPECT_EQ(inst->metadata().op_name(), "op1/op2/add"); EXPECT_EQ(inst->metadata().source_file(), "foo/bar/mysource.py"); EXPECT_EQ(inst->metadata().source_line(), 35); }; auto check_not_pipelined = [](const HloInstruction* instr) { const FrontendAttributes& attributes = instr->frontend_attributes(); EXPECT_EQ(attributes.map().end(), attributes.map().find(kSendRecvPipelineAttr)); }; HloInstruction* after_all = FindInstruction(module.get(), "after-all"); HloInstruction* recv = FindInstruction(module.get(), "recv"); EXPECT_EQ(recv->operand(0), after_all); EXPECT_EQ(recv->channel_id().value(), 1); EXPECT_THAT( recv->ToString(), HasSubstr( "_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}")); check_metadata(recv); check_not_pipelined(recv); HloInstruction* recv_done = FindInstruction(module.get(), "recv-done"); EXPECT_EQ(recv_done->operand(0), recv); HloInstruction* send = FindInstruction(module.get(), "send"); EXPECT_EQ(send->operand(1), after_all); EXPECT_EQ(send->channel_id().value(), 1); EXPECT_THAT( send->ToString(), HasSubstr( "_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}")); check_metadata(send); check_not_pipelined(send); HloInstruction* send_done = FindInstruction(module.get(), "send-done"); EXPECT_EQ(send_done->operand(0), send); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, op::GetTupleElement(recv_done, 0)); } TEST_F(CollectivePermuteDecomposerTest, NotTransformedDefaultChannelId) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[] replica-id() ROOT cp = u32[] collective-permute(p), source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectivePermuteDecomposerTest, ThresholdNotTransformed) { const char* const kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[] replica-id() ROOT cp = u32[] collective-permute(p), channel_id=1, source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(8); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectivePermuteDecomposerTest, Pipeline1) { const char* const kModuleStr = R"( HloModule module cond { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(2) ROOT result = pred[] compare(count, ub), direction=LT } body { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 send-data = get-tuple-element(param), index=1 recv-data = u32[2] collective-permute(send-data), channel_id=1, source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, frontend_attributes={_xla_other_attribute="xyz"} c1 = u32[] constant(1) new_count = u32[] add(count, c1) r = u32[2] broadcast(c1), dimensions={} s = u32[2] add(r, recv-data) ROOT result = (u32[], u32[2]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) c1 = u32[] constant(1) r = u32[] replica-id() a = u32[] add(c1, r) init = u32[2] broadcast(a), dimensions={} while_init = (u32[], u32[2]) tuple(c0, init) while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond ROOT result = u32[2] get-tuple-element(while_result), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* recv = FindInstruction(module.get(), "recv"); EXPECT_EQ(recv->channel_id().value(), 1); EXPECT_THAT( recv->ToString(), HasSubstr( "_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}")); EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); EXPECT_THAT(recv->ToString(), HasSubstr("_xla_other_attribute=\"xyz\"")); HloInstruction* recv_done = FindInstruction(module.get(), "recv-done"); EXPECT_THAT(recv_done->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); HloInstruction* send = FindInstruction(module.get(), "send"); EXPECT_EQ(send->channel_id().value(), 1); EXPECT_THAT( send->ToString(), HasSubstr( "_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}")); EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); EXPECT_THAT(send->ToString(), HasSubstr("_xla_other_attribute=\"xyz\"")); HloInstruction* send_done = FindInstruction(module.get(), "send-done"); EXPECT_THAT(send_done->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); EXPECT_FALSE(recv_done->control_predecessors().empty()); EXPECT_EQ(recv_done->control_predecessors()[0], send); } TEST_F(CollectivePermuteDecomposerTest, ForwardPipeline2) { const char* const kModuleStr = R"( HloModule module cond { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(2) ROOT result = pred[] compare(count, ub), direction=LT } body { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 send-data = get-tuple-element(param), index=1 recv-data.0 = u32[2] collective-permute(send-data), channel_id=1, source_target_pairs={{3,0}} recv-data.1 = u32[2] collective-permute(send-data), channel_id=2, source_target_pairs={{0,1}, {1,2}, {2,3}} replica = u32[] replica-id() constant0 = u32[] constant(0) compare0 = pred[] compare(replica, constant0), direction=EQ compare = pred[2] broadcast(compare0), dimensions={} recv-data = u32[2] select(compare, recv-data.0, recv-data.1) c1 = u32[] constant(1) new_count = u32[] add(count, c1) r = u32[2] broadcast(c1), dimensions={} s = u32[2] add(r, recv-data) ROOT result = (u32[], u32[2]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) c1 = u32[] constant(1) r = u32[] replica-id() a = u32[] add(c1, r) init = u32[2] broadcast(a), dimensions={} while_init = (u32[], u32[2]) tuple(c0, init) while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond ROOT result = u32[2] get-tuple-element(while_result), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* recv = FindInstruction(module.get(), "recv"); EXPECT_EQ(recv->channel_id().value(), 1); EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{3,0}}")); EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); HloInstruction* send = FindInstruction(module.get(), "send"); EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{3,0}}")); EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); HloInstruction* recv1 = FindInstruction(module.get(), "recv.1"); EXPECT_EQ(recv1->channel_id().value(), 2); EXPECT_THAT( recv1->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}")); EXPECT_THAT(recv1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\"")); HloInstruction* recv_done1 = FindInstruction(module.get(), "recv-done.1"); EXPECT_THAT(recv_done1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\"")); HloInstruction* send1 = FindInstruction(module.get(), "send.1"); EXPECT_THAT( send1->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}")); EXPECT_THAT(send1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\"")); HloInstruction* send_done1 = FindInstruction(module.get(), "send-done.1"); EXPECT_THAT(send_done1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\"")); } TEST_F(CollectivePermuteDecomposerTest, ForwardPipelineWithMatmul) { const char* const kModuleStr = R"( HloModule test while_body { inputs = (u32[], f32[2,2], f32[2,2]) parameter(0) iter = u32[] get-tuple-element(inputs), index=0 iter_increment = u32[] constant(1) next_iter = u32[] add(iter, iter_increment) partition-id = u32[] partition-id() zero = u32[] constant(0) compare = pred[] compare(partition-id, zero), direction=EQ broadcast = pred[2,2] broadcast(compare), dimensions={} weights = f32[2,2] get-tuple-element(inputs), index=2 data = f32[2,2] get-tuple-element(inputs), index=1 cp_back = f32[2,2] collective-permute(data), channel_id=1, source_target_pairs={{3,0}}, frontend_attributes={_xla_send_recv_validation="{{3,10}}"} cp_forward = f32[2,2] collective-permute(data), channel_id=2, source_target_pairs={{0,1},{1,2},{2,3}}, frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9}}"} select = f32[2,2] select(broadcast, cp_back, cp_forward) matmul = f32[2,2] dot(weights, select), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT result = (u32[], f32[2,2], f32[2,2]) tuple(next_iter, matmul, weights) } while_cond { inputs = (u32[], f32[2,2], f32[2,2]) parameter(0) iter = u32[] get-tuple-element(inputs), index=0 max_iter = u32[] constant(3) ROOT compare = pred[] compare(iter, max_iter), direction=LT } ENTRY test_computation { start_iter = u32[] constant(0) input_data = f32[2,2] parameter(0) input_weights = f32[2,2] parameter(1) input = (u32[], f32[2,2], f32[2,2]) tuple(start_iter, input_data, input_weights) while_result = (u32[], f32[2,2], f32[2,2]) while(input), condition=while_cond, body=while_body ROOT data_out = f32[2,2] get-tuple-element(while_result), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); HloModule* transformed_module = module.get(); HloComputation* while_body = FindComputation(transformed_module, "while_body"); HloInstruction* recv_bwd = hlo_query::FindInstruction(while_body, "recv"); EXPECT_EQ(recv_bwd->channel_id().value(), 1); auto recv_bwd_frontend_attributes = recv_bwd->frontend_attributes().map(); EXPECT_EQ(recv_bwd_frontend_attributes.size(), 3); EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvValidationAttr), "{{3,10}}"); EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvPipelineAttr), "0"); EXPECT_EQ(recv_bwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr), "{{3,0}}"); HloInstruction* send_bwd = hlo_query::FindInstruction(while_body, "send"); auto send_bwd_frontend_attributes = send_bwd->frontend_attributes().map(); EXPECT_THAT(send_bwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr), "{{3,0}}"); HloInstruction* recv_fwd = hlo_query::FindInstruction(while_body, "recv.1"); EXPECT_EQ(recv_fwd->channel_id().value(), 2); auto recv_fwd_frontend_attributes = recv_fwd->frontend_attributes().map(); EXPECT_EQ(recv_fwd_frontend_attributes.size(), 3); EXPECT_EQ(recv_fwd_frontend_attributes.at(kSendRecvPipelineAttr), "1"); EXPECT_EQ(recv_fwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr), "{{0,1},{1,2},{2,3}}"); HloInstruction* send_fwd = hlo_query::FindInstruction(while_body, "send.1"); auto send_fwd_frontend_attributes = send_fwd->frontend_attributes().map(); EXPECT_EQ(send_fwd_frontend_attributes.size(), 3); EXPECT_EQ(send_fwd_frontend_attributes.at(kSendRecvPipelineAttr), "1"); EXPECT_EQ(send_fwd_frontend_attributes.at(kSendRecvSourceTargetPairsAttr), "{{0,1},{1,2},{2,3}}"); EXPECT_NE(while_body, nullptr); HloInstruction* recv_done_fwd = hlo_query::FindInstruction(while_body, "recv-done"); HloInstruction* recv_done_bwd = hlo_query::FindInstruction(while_body, "recv-done.1"); EXPECT_EQ(recv_done_fwd->control_predecessors()[0], send_bwd); EXPECT_EQ(recv_done_bwd->control_predecessors()[0], send_fwd); } TEST_F(CollectivePermuteDecomposerTest, BackwardPipeline2) { const char* const kModuleStr = R"( HloModule module cond { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(2) ROOT result = pred[] compare(count, ub), direction=LT } body { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 send-data = get-tuple-element(param), index=1 recv-data.0 = u32[2] collective-permute(send-data), channel_id=1, source_target_pairs={{1,0},{2,1},{3,2}} recv-data.1 = u32[2] collective-permute(send-data), channel_id=2, source_target_pairs={{0,3}} replica = u32[] replica-id() constant0 = u32[] constant(0) compare0 = pred[] compare(replica, constant0), direction=NE compare = pred[2] broadcast(compare0), dimensions={} recv-data = u32[2] select(compare, recv-data.0, recv-data.1) c1 = u32[] constant(1) new_count = u32[] add(count, c1) r = u32[2] broadcast(c1), dimensions={} s = u32[2] add(r, recv-data) ROOT result = (u32[], u32[2]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) c1 = u32[] constant(1) r = u32[] replica-id() a = u32[] add(c1, r) init = u32[2] broadcast(a), dimensions={} while_init = (u32[], u32[2]) tuple(c0, init) while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond ROOT result = u32[2] get-tuple-element(while_result), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kModuleStr))); CollectivePermuteDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* recv = FindInstruction(module.get(), "recv"); EXPECT_EQ(recv->channel_id().value(), 1); EXPECT_THAT( recv->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{1,0},{2,1},{3,2}}")); EXPECT_THAT(recv->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\"")); HloInstruction* send = FindInstruction(module.get(), "send"); EXPECT_THAT( send->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{1,0},{2,1},{3,2}}")); EXPECT_THAT(send->ToString(), HasSubstr("_xla_send_recv_pipeline=\"1\"")); HloInstruction* recv1 = FindInstruction(module.get(), "recv.1"); EXPECT_EQ(recv1->channel_id().value(), 2); EXPECT_THAT(recv1->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{0,3}}")); EXPECT_THAT(recv1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); HloInstruction* send1 = FindInstruction(module.get(), "send.1"); EXPECT_THAT(send1->ToString(), HasSubstr("_xla_send_recv_source_target_pairs={{0,3}}")); EXPECT_THAT(send1->ToString(), HasSubstr("_xla_send_recv_pipeline=\"0\"")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_permute_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_permute_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
50cd28dd-fb5f-48ac-8b47-b0f528cfc036
cpp
tensorflow/tensorflow
collective_transformation_reorderer
third_party/xla/xla/service/collective_transformation_reorderer.cc
third_party/xla/xla/service/collective_transformation_reorderer_test.cc
#include "xla/service/collective_transformation_reorderer.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_dce.h" #include "tsl/platform/statusor.h" namespace xla { namespace { struct CollectiveTransformation { HloInstruction* hlo; int64_t transformed_collective_dimension; }; std::optional<std::vector<CollectiveTransformation>> GetAllGatherTransformations(HloInstruction* all_gather) { std::vector<HloInstruction*> transformation_hlos; { HloInstruction* transformation_hlo = all_gather; bool found_unsupported_transformation = false; while (transformation_hlo->user_count() == 1 && !found_unsupported_transformation) { transformation_hlo = transformation_hlo->users()[0]; switch (transformation_hlo->opcode()) { case HloOpcode::kReshape: { transformation_hlos.push_back(transformation_hlo); break; } default: found_unsupported_transformation = true; } } } if (transformation_hlos.empty()) { return std::nullopt; } auto get_reshaped_all_gather_dimension = [](const Shape& all_gather_shape, int64_t all_gather_dimension, HloInstruction* transformation_hlo) -> std::optional<int64_t> { int64_t all_gather_num_strides = absl::c_accumulate( all_gather_shape.dimensions().subspan(0, all_gather_dimension), 1, [](int64_t product, int64_t dimension_size) { return product * dimension_size; }); int64_t reshaped_all_gather_dimension = 0; int64_t reshaped_num_strides = 1; while (reshaped_all_gather_dimension < transformation_hlo->shape().dimensions_size() && reshaped_num_strides < all_gather_num_strides) { reshaped_num_strides *= transformation_hlo->shape().dimensions(reshaped_all_gather_dimension); ++reshaped_all_gather_dimension; } if (reshaped_num_strides != all_gather_num_strides) { return std::nullopt; } if (transformation_hlo->shape().dimensions(reshaped_all_gather_dimension) != all_gather_shape.dimensions(all_gather_dimension)) { return std::nullopt; } return reshaped_all_gather_dimension; }; std::vector<CollectiveTransformation> transformations; HloAllGatherInstruction* all_gather_instruction = DynCast<HloAllGatherInstruction>(all_gather); Shape all_gather_shape = all_gather_instruction->shape(); int64_t all_gather_dimension = all_gather_instruction->all_gather_dimension(); CHECK(all_gather_instruction != nullptr); for (HloInstruction* transformation_hlo : transformation_hlos) { bool found_unsupported_transformation = false; switch (transformation_hlo->opcode()) { case HloOpcode::kReshape: { std::optional<int64_t> reshaped_all_gather_dimension = get_reshaped_all_gather_dimension( all_gather_shape, all_gather_dimension, transformation_hlo); if (reshaped_all_gather_dimension.has_value()) { transformations.push_back( {transformation_hlo, *reshaped_all_gather_dimension}); all_gather_shape = transformation_hlo->shape(); all_gather_dimension = *reshaped_all_gather_dimension; } else { found_unsupported_transformation = true; } break; } default: return std::nullopt; } if (found_unsupported_transformation) { break; } } if (transformations.empty()) { return std::nullopt; } return transformations; } std::vector<HloInstruction*> GetAllReduceTransformations( HloInstruction* all_reduce) { HloAllReduceInstruction* all_reduce_instruction = DynCast<HloAllReduceInstruction>(all_reduce); CHECK_NE(all_reduce_instruction, nullptr); if (all_reduce_instruction->constrain_layout()) { return {}; } std::vector<HloInstruction*> transformation_hlos; HloInstruction* transformation_hlo = all_reduce->mutable_operand(0); while (transformation_hlo->opcode() == HloOpcode::kReshape && transformation_hlo->user_count() == 1) { transformation_hlos.push_back(transformation_hlo); transformation_hlo = transformation_hlo->mutable_operand(0); } return transformation_hlos; } } absl::StatusOr<bool> CollectiveTransformationReorder::ReorderAllGatherTransformations( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { HloInstructionMap<std::vector<CollectiveTransformation>> all_gather_to_transformations; for (HloComputation* computation : module->MakeComputationPostOrder(execution_threads)) { for (HloInstruction* instruction : computation->MakeInstructionPostOrder()) { if (instruction->opcode() == HloOpcode::kAllGather) { if (instruction->operand_count() != 1) { continue; } std::optional<std::vector<CollectiveTransformation>> all_gather_transformations = GetAllGatherTransformations(instruction); if (all_gather_transformations.has_value()) { all_gather_to_transformations[instruction] = *std::move(all_gather_transformations); } } } } if (all_gather_to_transformations.empty()) { return false; } auto reshape_all_gather_operand = [](HloInstruction* all_gather_operand, int64_t original_all_gather_dimension, const CollectiveTransformation& transformation) { Shape reshaped_all_gather_operand_shape = transformation.hlo->shape(); int64_t operand_all_gather_dimension_size = all_gather_operand->shape().dimensions( original_all_gather_dimension); reshaped_all_gather_operand_shape.set_dimensions( transformation.transformed_collective_dimension, operand_all_gather_dimension_size); HloComputation* computation = all_gather_operand->parent(); return computation->AddInstruction(HloInstruction::CreateReshape( reshaped_all_gather_operand_shape, all_gather_operand)); }; for (auto& [instruction, transformations] : all_gather_to_transformations) { HloAllGatherInstruction* all_gather = DynCast<HloAllGatherInstruction>(instruction); int64_t all_gather_dimension = all_gather->all_gather_dimension(); int64_t original_all_gather_dimension_size = all_gather->shape().dimensions(all_gather_dimension); HloInstruction* all_gather_operand = instruction->mutable_operand(0); for (const CollectiveTransformation& transformation : transformations) { all_gather_operand = reshape_all_gather_operand( all_gather_operand, all_gather_dimension, transformation); all_gather_dimension = transformation.transformed_collective_dimension; } Shape new_all_gather_shape = all_gather_operand->shape(); new_all_gather_shape.set_dimensions(all_gather_dimension, original_all_gather_dimension_size); HloComputation* computation = all_gather_operand->parent(); HloInstruction* new_all_gather = computation->AddInstruction(HloInstruction::CreateAllGather( new_all_gather_shape, {all_gather_operand}, all_gather_dimension, all_gather->device_list(), all_gather->constrain_layout(), all_gather->channel_id(), all_gather->use_global_device_ids())); TF_RETURN_IF_ERROR( transformations.back().hlo->ReplaceAllUsesWith(new_all_gather)); if (computation->root_instruction() == transformations.back().hlo) { computation->set_root_instruction(new_all_gather); } } return true; } absl::StatusOr<bool> CollectiveTransformationReorder::ReorderAllReduceTransformations( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { HloInstructionMap<std::vector<HloInstruction*>> all_reduce_to_transformations; for (HloComputation* computation : module->MakeComputationPostOrder(execution_threads)) { for (HloInstruction* instruction : computation->MakeInstructionPostOrder()) { if (instruction->opcode() == HloOpcode::kAllReduce) { if (instruction->user_count() != 1 || computation->root_instruction() == instruction) { continue; } std::vector<HloInstruction*> reshapes = GetAllReduceTransformations(instruction); if (reshapes.empty()) { continue; } all_reduce_to_transformations[instruction] = std::move(reshapes); } } } if (all_reduce_to_transformations.empty()) { return false; } for (auto& [inst, reshapes] : all_reduce_to_transformations) { HloComputation* computation = inst->parent(); HloAllReduceInstruction* all_reduce = DynCast<HloAllReduceInstruction>(inst); CHECK(!reshapes.empty()); HloInstruction* cur_operand = reshapes.back()->mutable_operand(0); HloInstruction* new_all_reduce = computation->AddInstruction(HloInstruction::CreateAllReduce( cur_operand->shape(), {cur_operand}, all_reduce->to_apply(), all_reduce->device_list(), all_reduce->constrain_layout(), all_reduce->channel_id(), all_reduce->use_global_device_ids())); cur_operand = new_all_reduce; for (int64_t i = reshapes.size() - 1; i >= 0; --i) { cur_operand = computation->AddInstruction( HloInstruction::CreateReshape(reshapes[i]->shape(), cur_operand)); } TF_RETURN_IF_ERROR( computation->ReplaceInstruction(all_reduce, cur_operand)); } return true; } absl::StatusOr<bool> CollectiveTransformationReorder::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_ASSIGN_OR_RETURN(bool ag_changed, ReorderAllGatherTransformations( module, execution_threads)); TF_ASSIGN_OR_RETURN(bool ar_changed, ReorderAllReduceTransformations( module, execution_threads)); if (ag_changed || ar_changed) { HloDCE dce; TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status()); } return ag_changed || ar_changed; } }
#include "xla/service/collective_transformation_reorderer.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_verifier.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class CollectiveTransformationReordererTest : public HloTestBase { public: absl::StatusOr<bool> RunCollectiveTransformationReorderer(HloModule* module) { CollectiveTransformationReorder reorderer; return reorderer.Run(module, {}); } }; TEST_F(CollectiveTransformationReordererTest, ReshapeWithinShardAfterAllGatherDim) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,4,1024] parameter(0) all-gather = bf16[8,32,1024] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 ROOT reshape = bf16[8,32,8,128] reshape(all-gather) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllGather(op::Reshape(op::Parameter()))); HloInstruction* all_gather = module->entry_computation()->root_instruction(); EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1)); } TEST_F(CollectiveTransformationReordererTest, ReshapeWithinShardBeforeAllGatherDim) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,32,8,4,1024] parameter(0) all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 ROOT reshape = bf16[2048,32,1024] reshape(all-gather) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllGather(op::Reshape(op::Parameter()))); HloInstruction* all_gather = module->entry_computation()->root_instruction(); EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1)); } TEST_F(CollectiveTransformationReordererTest, ReshapeWithinShardBeforeAndAfterAllGatherDim) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,32,8,4,1024] parameter(0) all-gather = bf16[8,32,8,32,1024] all-gather(param), dimensions={3}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 ROOT reshape = bf16[2048,32,8,128] reshape(all-gather) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllGather(op::Reshape(op::Parameter()))); HloInstruction* all_gather = module->entry_computation()->root_instruction(); EXPECT_THAT(all_gather->dimensions(), ::testing::ElementsAre(1)); } TEST_F(CollectiveTransformationReordererTest, ReshapeAcrossShards) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,1,8,128] parameter(0) all-gather = bf16[8,8,8,128] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 ROOT reshape = bf16[64,8,128] reshape(all-gather) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectiveTransformationReordererTest, MergeAllGatherDimensionWithNext) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,8,16,16] parameter(0) all-gather = bf16[64,8,16,16] all-gather(param), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 ROOT reshape = bf16[512,16,16] reshape(all-gather) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectiveTransformationReordererTest, MergeAllGatherDimensionWithPrevious) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { param = bf16[8,8,16,16] parameter(0) all-gather = bf16[8,64,16,16] all-gather(param), dimensions={1}, replica_groups={{0,1,2,3,4,5,6,7}}, channel_id=1 ROOT reshape = bf16[512,16,16] reshape(all-gather) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectiveTransformationReordererTest, AllReduceSingleReshape) { absl::string_view hlo_string = R"( HloModule module add { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT s = bf16[] add(a, b) } ENTRY entry { param = bf16[16384,6144] parameter(0) reshape = bf16[1,16384,6144] reshape(param) all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add constant = s32[] constant(0) ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK(HloVerifier(false, true) .Run(module.get()) .status()); EXPECT_THAT(module->entry_computation()->root_instruction(), op::DynamicSlice(op::Reshape(op::AllReduce(op::Parameter())), op::Constant(), op::Constant(), op::Constant())); } TEST_F(CollectiveTransformationReordererTest, AllReduceTwoReshapes) { absl::string_view hlo_string = R"( HloModule module add { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT s = bf16[] add(a, b) } ENTRY entry { param = bf16[16384,3072,2] parameter(0) reshape.1 = bf16[16384,6144] reshape(param) reshape.2 = bf16[1,16384,6144] reshape(reshape.1) all-reduce = bf16[1,16384,6144] all-reduce(reshape.2), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add constant = s32[] constant(0) ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK(HloVerifier(false, true) .Run(module.get()) .status()); EXPECT_THAT( module->entry_computation()->root_instruction(), op::DynamicSlice(op::Reshape(op::Reshape(op::AllReduce(op::Parameter()))), op::Constant(), op::Constant(), op::Constant())); } TEST_F(CollectiveTransformationReordererTest, AllReduceReshapeWithTwoUsers) { absl::string_view hlo_string = R"( HloModule module add { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT s = bf16[] add(a, b) } ENTRY entry { param = bf16[16384,6144] parameter(0) reshape = bf16[1,16384,6144] reshape(param) all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add constant = s32[] constant(0) dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384} copy = bf16[1,16384,6144] copy(reshape) ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectiveTransformationReordererTest, AllReduceWithTwoUsersReshape) { absl::string_view hlo_string = R"( HloModule module add { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT s = bf16[] add(a, b) } ENTRY entry { param = bf16[16384,6144] parameter(0) reshape = bf16[1,16384,6144] reshape(param) all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add constant = s32[] constant(0) dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384} copy = bf16[1,16384,6144] copy(all-reduce) ROOT tuple = (bf16[1,16384,6144], bf16[1,16384,384]) tuple(copy, dynamic-slice) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectiveTransformationReordererTest, AllReduceConstrainLayout) { absl::string_view hlo_string = R"( HloModule module add { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT s = bf16[] add(a, b) } ENTRY entry { param = bf16[16384,6144] parameter(0) reshape = bf16[1,16384,6144] reshape(param) all-reduce = bf16[1,16384,6144] all-reduce(reshape), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, constrain_layout=true, to_apply=add constant = s32[] constant(0) ROOT dynamic-slice = bf16[1,16384,384] dynamic-slice(all-reduce, constant, constant, constant), dynamic_slice_sizes={1,16384,384} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunCollectiveTransformationReorderer(module.get())); EXPECT_FALSE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_transformation_reorderer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_transformation_reorderer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
df627059-2ef8-47c7-880a-5779a2e8d0a0
cpp
tensorflow/tensorflow
hlo_unstacker
third_party/xla/xla/service/hlo_unstacker.cc
third_party/xla/xla/service/hlo_unstacker_test.cc
#include "xla/service/hlo_unstacker.h" #include <algorithm> #include <cstdint> #include <deque> #include <functional> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/map_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/service/tuple_util.h" #include "xla/service/while_loop_unroller.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { enum class PatternType { DSFusionNoBitcastPattern, DSFusionPattern, NestedDSFusionPattern, Other, }; static std::string PatternTypeToString(PatternType pattern_type) { switch (pattern_type) { case PatternType::DSFusionNoBitcastPattern: return "DSFusionNoBitcastPattern"; case PatternType::DSFusionPattern: return "DSFusionPattern"; case PatternType::NestedDSFusionPattern: return "NestedDSFusionPattern"; case PatternType::Other: return "Other"; } } struct PatternInfo { PatternType type; std::vector<const HloInstruction*> unstacked_instrs; const HloInstruction* instr; Shape unstacked_shape; HloComputation* unstacking_computation; std::string ToString() const { if (unstacking_computation == nullptr) { return absl::StrCat("type: \n\t", PatternTypeToString(type), "\n", "instr: \n\t", instr->name(), "\n", "shape: \n\t", unstacked_shape.ToString(true)); } else { return absl::StrCat("type: \n\t", PatternTypeToString(type), "\n", "instr: \n\t", instr->name(), "\n", "shape: \n\t", unstacked_shape.ToString(true), "\n", "comp: \n", unstacking_computation->name()); } } }; struct UnstackerMetadata { static absl::StatusOr<UnstackerMetadata> Create( HloModule* module, std::function<bool(HloInstruction*)> unfuse_slice) { UnstackerMetadata metadata; TF_ASSIGN_OR_RETURN( bool prepared, WhileLoopUnroller::PrepareModuleForUnrolling(module, {})); if (prepared) { VLOG(3) << "Prepared module: " << module->name() << " for unstacking."; } std::vector<std::pair<HloInstruction*, WhileLoopConfig>> loops = WhileLoopUnroller::GetUnrollableLoops(module, {}, std::nullopt); for (const auto& [instr, while_loop_config] : loops) { metadata.unrollable_loop_bodies[instr->while_body()] = while_loop_config; metadata.bodies[instr->while_body()] = instr; } metadata.unfuse_slice = unfuse_slice; return metadata; } absl::flat_hash_map<HloComputation*, WhileLoopConfig> unrollable_loop_bodies; absl::flat_hash_map<const HloComputation*, HloInstruction*> bodies; std::vector< std::pair<std::function<std::optional<PatternInfo>( const UnstackerMetadata&, const HloInstruction*, int64_t)>, std::function<absl::Status(HloInstruction*, const Shape&)>>> custom_handlers; std::function<bool(HloInstruction*)> unfuse_slice; }; class UnstackerTransformer { public: explicit UnstackerTransformer(const UnstackerMetadata& metadata) : metadata_(metadata) {} std::vector<const HloInstruction*> HandleInstruction( const HloInstruction* instr, int64_t changed_idx) { if (instr->opcode() != HloOpcode::kFusion) { return {}; } VLOG(3) << "HandleInstruction(" << instr->shape().ToString() << instr->name() << ", " << changed_idx << ")"; for (const auto& [custom_pattern, custom_handler] : metadata_.custom_handlers) { std::optional<PatternInfo> stacked_user = custom_pattern(metadata_, instr, changed_idx); if (!stacked_user.has_value()) { continue; } PatternInfo& pattern_info = stacked_user.value(); pattern_type_ = pattern_info.type; VLOG(3) << "PatternInfo:" << "\n" << pattern_info.ToString(); if (pattern_info.unstacking_computation != nullptr && unstacking_computation_ != nullptr) { if (!absl::EqualsIgnoreCase( pattern_info.unstacking_computation->ToString( HloPrintOptions::Fingerprint()), unstacking_computation_->ToString( HloPrintOptions::Fingerprint()))) { VLOG(3) << "Seen multiple unstacking computations, cannot handle: " << "\n previous computations: \n" << unstacking_computation_->ToString( HloPrintOptions::Fingerprint()) << "\n current computations: \n" << pattern_info.unstacking_computation->ToString( HloPrintOptions::Fingerprint()); return {}; } } if (pattern_info.unstacking_computation != nullptr) { unstacking_computation_ = pattern_info.unstacking_computation; } unstacked_shape_ = std::make_unique<Shape>(pattern_info.unstacked_shape); unstacked_instrs_.push_back(instr); std::function<absl::Status()> unstack_wrapper = [&custom_handler = custom_handler, pattern_info]() mutable -> absl::Status { HloInstruction* mutable_dynamic_slicing_fusion = const_cast<HloInstruction*>(pattern_info.instr); return custom_handler(mutable_dynamic_slicing_fusion, pattern_info.unstacked_shape.tuple_shapes(0)); }; body_changes_.push_back(unstack_wrapper); return pattern_info.unstacked_instrs; } return {}; } const UnstackerMetadata& GetMetadata() const { return metadata_; } std::vector<const HloInstruction*>& GetUnstackedInstructions() { return unstacked_instrs_; } const Shape* GetUnstackedShape() const { return unstacked_shape_.get(); } HloComputation* GetUnstackingComputation() const { return unstacking_computation_; } std::vector<std::function<void(const UnstackerTransformer&)>>& GetLoopChanges() { return loop_changes_; } std::vector<std::function<absl::Status()>>& GetBodyChanges() { return body_changes_; } absl::flat_hash_map<HloInstruction*, std::vector<int64_t>>& GetOperandChanges() { return operand_changes_; } void AddOperandChange(HloInstruction* instr, int64_t index) { operand_changes_[instr].push_back(index); } void AddLoopChange( std::function<void(const UnstackerTransformer&)> loop_change) { loop_changes_.push_back(loop_change); } PatternType GetPatternType() const { return pattern_type_; } private: PatternType pattern_type_; const UnstackerMetadata& metadata_; std::unique_ptr<Shape> unstacked_shape_ = nullptr; HloComputation* unstacking_computation_ = nullptr; std::vector<std::function<void(const UnstackerTransformer&)>> loop_changes_; std::vector<std::function<absl::Status()>> body_changes_; absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> operand_changes_; std::vector<const HloInstruction*> unstacked_instrs_; }; bool CanUnstackWhileOperand(const HloInstruction* while_instr, UnstackerTransformer& unstacker, int64_t index); bool UnstackWhileOperandAtIndex( const UnstackerMetadata& metadata, HloInstruction* while_instr, int64_t index, std::vector<const HloInstruction*>& unstacked_instructions); bool PropagateGteShapeChange(HloInstruction* gte, UnstackerTransformer& unstacker) { VLOG(5) << "PropagateGteShapeChange(" << gte->name() << ")"; std::vector<const HloInstruction*> handled_instrs; absl::flat_hash_map<HloInstruction*, int64_t> visited; std::deque<HloInstruction*> worklist; worklist.push_back(gte); visited.insert({gte, gte->tuple_index()}); while (!worklist.empty()) { HloInstruction* changed_instr_to_propagate = worklist.front(); int64_t changed_operand_index = FindOrDie(visited, changed_instr_to_propagate); worklist.pop_front(); for (HloInstruction* user : changed_instr_to_propagate->users()) { if (ContainsKey(visited, user)) { continue; } if (user->opcode() == HloOpcode::kGetTupleElement) { if (user->tuple_index() != changed_operand_index) { continue; } visited.insert({user, changed_operand_index}); worklist.push_back(user); } else if (user->opcode() == HloOpcode::kTuple) { int64_t use_index = user->operand_index(changed_instr_to_propagate); visited.insert({user, {use_index}}); worklist.push_back(user); } else if (user->opcode() == HloOpcode::kWhile) { bool changed_nested_while = CanUnstackWhileOperand(user, unstacker, changed_operand_index); if (!changed_nested_while) { return false; } visited.insert({user, changed_operand_index}); worklist.push_back(user); } else { if (absl::c_find(handled_instrs, user) != handled_instrs.end()) { continue; } if (user->IsCustomCall("DynamicGte") || user->IsCustomCall("DynamicTuple")) { continue; } int64_t use_index = user->operand_index(changed_instr_to_propagate); std::vector<const HloInstruction*> curr_handled_instrs = unstacker.HandleInstruction(user, use_index); if (curr_handled_instrs.empty()) { VLOG(3) << "Custom unstacker not found for " << user->name(); return false; } for (const HloInstruction* instr : curr_handled_instrs) { for (HloInstruction* handled_instr_user : instr->users()) { if (user->shape() == gte->shape()) { visited.insert({handled_instr_user, changed_operand_index}); worklist.push_back(handled_instr_user); } } handled_instrs.push_back(instr); } } } } for (const auto& [instr, index] : visited) { unstacker.AddOperandChange(instr, index); } return true; } bool CanPropagateGteShapeChangesInComputation( const HloComputation* comp, const HloInstruction* operand, UnstackerTransformer& shape_transformer, int64_t idx) { VLOG(3) << "Propagating shape change of index " << idx << " in : " << comp->name(); for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { if (instr->opcode() == HloOpcode::kGetTupleElement && instr->tuple_index() == idx) { if (instr->operand(0) != operand) { continue; } bool can_propagate = PropagateGteShapeChange(instr, shape_transformer); if (!can_propagate) { VLOG(3) << "Failed to propagate shape change for " << instr->name(); return false; } } } VLOG(3) << "Finish propagating shape change of index " << idx << " in: " << comp->name(); return true; } void UnstackWhileInput(const UnstackerTransformer& unstacker, HloInstruction* while_instr, int64_t index) { VLOG(3) << "Unstacking while input: " << while_instr->name() << " at " << index; const Shape* new_shape = unstacker.GetUnstackedShape(); HloComputation* unstacking_computation = unstacker.GetUnstackingComputation(); const Shape& slice_shape = new_shape->tuple_shapes(0); HloInstruction* old_while_input = while_instr->while_init()->mutable_operand(index); if (old_while_input->shape().IsTuple()) { VLOG(3) << "Input is already unstacked: " << old_while_input->name(); return; } std::vector<HloInstruction*> slices; if (old_while_input->IsCustomCall("AllocateBuffer")) { for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) { slices.push_back(while_instr->AddInstruction( HloInstruction::CreateCustomCall(slice_shape, {}, "AllocateBuffer"))); } } else { for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) { HloInstruction* root_instr = unstacking_computation->root_instruction(); HloInstruction* slice = nullptr; if (unstacker.GetPatternType() == PatternType::DSFusionPattern || unstacker.GetPatternType() == PatternType::NestedDSFusionPattern || unstacker.GetPatternType() == PatternType::DSFusionNoBitcastPattern) { HloInstruction* dynamic_slice = nullptr; if (unstacker.GetPatternType() == PatternType::DSFusionPattern || unstacker.GetPatternType() == PatternType::NestedDSFusionPattern) { dynamic_slice = root_instr->mutable_operand(0); } else if (unstacker.GetPatternType() == PatternType::DSFusionNoBitcastPattern) { dynamic_slice = root_instr; } std::vector<int64_t> new_start_indices; new_start_indices.reserve(dynamic_slice->shape().rank()); std::vector<int64_t> new_limit_indices; new_limit_indices.reserve(dynamic_slice->shape().rank()); std::vector<int64_t> new_strides; new_strides.reserve(dynamic_slice->shape().rank()); new_start_indices.push_back(i); new_limit_indices.push_back(i + 1); new_strides.push_back(1); for (int64_t j = 1; j < dynamic_slice->shape().rank(); ++j) { new_start_indices.push_back(0); new_limit_indices.push_back( dynamic_slice->mutable_operand(0)->shape().dimensions(j)); new_strides.push_back(1); } slice = while_instr->AddInstruction(HloInstruction::CreateSlice( dynamic_slice->shape(), old_while_input, new_start_indices, new_limit_indices, new_strides)); } if (slice == nullptr || !unstacker.GetMetadata().unfuse_slice(slice)) { std::vector<HloInstruction*> operands = { old_while_input, while_instr->AddInstruction(MakeScalarConstantWithShape( unstacking_computation->parameter_instruction(1)->shape(), i))}; slice = while_instr->AddInstruction(HloInstruction::CreateFusion( slice_shape, HloInstruction::FusionKind::kLoop, operands, while_instr->GetModule()->AddEmbeddedComputation( unstacking_computation->Clone()), "hoisted")); } slices.push_back(slice); } } HloInstruction* new_operand_element = while_instr->AddInstruction(HloInstruction::CreateTuple(slices)); HloInstruction* new_while_init = TupleUtil::ReplaceTupleWith(new_operand_element, while_instr->while_init(), {index}, false) .value(); CHECK_OK(while_instr->ReplaceOperandWithDifferentShape(0, new_while_init)); } bool CanUnstackWhileOperand(const HloInstruction* while_instr, UnstackerTransformer& unstacker, int64_t index) { VLOG(5) << "ReplaceWhileOperandShape: " << while_instr->name() << " at " << index; bool body_changes_collected = CanPropagateGteShapeChangesInComputation( while_instr->while_body(), while_instr->while_body()->parameter_instruction(0), unstacker, index); if (!body_changes_collected) { return false; } bool condition_changes_collected = CanPropagateGteShapeChangesInComputation( while_instr->while_condition(), while_instr->while_condition()->parameter_instruction(0), unstacker, index); if (!condition_changes_collected) { return false; } bool parent_changes_collected = CanPropagateGteShapeChangesInComputation( while_instr->parent(), while_instr, unstacker, index); if (!parent_changes_collected) { VLOG(3) << "Failed: parent_changes_collected"; return false; } HloInstruction* root_operand = while_instr->while_body()->root_instruction()->mutable_operand(index); if (root_operand == nullptr) { return false; } HloInstruction* gte_operand = nullptr; if (Match(root_operand, match::GetTupleElement(match::Op(&gte_operand)))) { if (Match(gte_operand, match::While())) { VLOG(3) << "Faced a gte originating from loop: " << root_operand->ToString(); bool loop_feeding_root_changes_collected = CanUnstackWhileOperand( root_operand->operand(0), unstacker, root_operand->tuple_index()); if (!loop_feeding_root_changes_collected) { VLOG(3) << "Failed: loop " << root_operand->operand(0)->name() << " output at " << index << " is not unstackable"; return false; } } else if (!Match(gte_operand, match::Parameter().WithParameterNum(0))) { VLOG(3) << "Failed: root operand of while_body at " << index << " is not a parameter"; return false; } } auto loop_change = [=](const UnstackerTransformer& unstacker, HloInstruction* loop, int64_t idx) mutable { Shape old_shape = ShapeUtil::MakeStaticShape( loop->while_body()->parameter_instruction(0)->shape()); ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), idx, &old_shape); loop->while_body()->ReplaceParameter( 0, HloInstruction::CreateParameter(0, old_shape, "unstacked")); loop->while_condition()->ReplaceParameter( 0, HloInstruction::CreateParameter(0, old_shape, "unstacked")); CHECK_NE(unstacker.GetUnstackingComputation(), nullptr); UnstackWhileInput(unstacker, loop, idx); *loop->mutable_shape() = old_shape; }; auto loop_change_wrapper = [&loop_change, while_instr, index](const UnstackerTransformer& unstacker) { HloInstruction* mutable_loop = const_cast<HloInstruction*>(while_instr); loop_change(unstacker, mutable_loop, index); }; unstacker.AddLoopChange(loop_change_wrapper); return true; } bool UnstackWhileOperandAtIndex( const UnstackerMetadata& metadata, HloInstruction* while_instr, int64_t index, std::vector<const HloInstruction*>& unstacked_instructions) { UnstackerTransformer unstacker = UnstackerTransformer(metadata); bool can_unstack = CanUnstackWhileOperand(while_instr, unstacker, index); if (!can_unstack) { VLOG(3) << "Unstacking failed for " << while_instr->name() << " at " << index; return false; } if (unstacker.GetUnstackedShape() == nullptr) { VLOG(3) << "Failed: unstacked shape is null"; return false; } if (unstacker.GetUnstackingComputation() == nullptr) { VLOG(3) << "Failed: unstacking computation is null"; return false; } for (auto& [instr, indices] : unstacker.GetOperandChanges()) { switch (instr->opcode()) { case HloOpcode::kGetTupleElement: VLOG(3) << "Changing shape of: " << instr->name(); *instr->mutable_shape() = *unstacker.GetUnstackedShape(); break; case HloOpcode::kTuple: { for (int64_t index : indices) { VLOG(3) << "Changing shape of: " << instr->name() << " at " << index; *instr->mutable_shape()->mutable_tuple_shapes(index) = *unstacker.GetUnstackedShape(); } break; } case HloOpcode::kWhile: for (int64_t index : indices) { VLOG(3) << "Changing shape of: " << instr->name() << " at " << index; ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), index, instr->mutable_shape()); } break; default: LOG(FATAL) << "Unsupported opcode: " << instr->name(); } } for (const auto& body_change : unstacker.GetBodyChanges()) { CHECK_OK(body_change()); } for (auto& loop_change : unstacker.GetLoopChanges()) { loop_change(unstacker); } for (const HloInstruction* instr : unstacker.GetUnstackedInstructions()) { unstacked_instructions.push_back(instr); } return true; } Shape MakeUnstackedShapeFromSlice(const Shape& slice_shape, int64_t layers) { std::vector<Shape> shapes; shapes.reserve(layers); for (int64_t i = 0; i < layers; ++i) { shapes.push_back(slice_shape); } return ShapeUtil::MakeTupleShape(shapes); } std::optional<WhileLoopConfig> IsFusionInsideUnrollableLoopWithNumParameter( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t num_fusion_params) { if (instr->opcode() != HloOpcode::kFusion) { return std::nullopt; } if (instr->fused_parameters().size() != num_fusion_params) { VLOG(3) << "Fusion has different number of parameters"; return std::nullopt; } if (!metadata.unrollable_loop_bodies.contains(instr->parent())) { VLOG(5) << "Fusion not inside unrollable while body, " << instr->name() << " inside " << instr->parent()->name(); return std::nullopt; } return metadata.unrollable_loop_bodies.at(instr->parent()); } HloInstruction* GetMostMajorEffectivelyStaticDynamicSliceInFusion( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t num_fusion_params, int64_t stacked_operand_idx) { std::optional<WhileLoopConfig> while_instr_config = IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr, num_fusion_params); if (!while_instr_config.has_value()) { return nullptr; } for (HloInstruction* fused_instr : instr->fused_instructions_computation()->MakeInstructionPostOrder()) { std::optional<int64_t> dynamic_index = MatchEffectivelyStaticDynamicSliceInsideLoop( fused_instr, instr->fused_instructions_computation()->parameter_instruction( stacked_operand_idx), while_instr_config.value()); if (dynamic_index.has_value() && dynamic_index.value() == 0) { return fused_instr; } } return nullptr; } HloInstruction* GetMostMajorShapeCoveringDynamicIndexInFusion( const UnstackerMetadata& metadata, const HloInstruction* instr, HloOpcode opcode, int64_t num_fusion_params, int64_t stacked_operand_idx) { std::optional<WhileLoopConfig> while_instr_config = IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr, num_fusion_params); if (!while_instr_config.has_value()) { return nullptr; } for (HloInstruction* fused_instr : instr->fused_instructions_computation()->MakeInstructionPostOrder()) { if (fused_instr->opcode() != opcode) { continue; } std::optional<int64_t> dynamic_index = MatchShapeCoveringDynamicIndexInstruction( fused_instr, instr->fused_instructions_computation()->parameter_instruction( stacked_operand_idx), opcode, while_instr_config.value()); if (dynamic_index.has_value() && dynamic_index.value() == 0) { return fused_instr; } } return nullptr; } std::optional<PatternInfo> GetDSFusionPattern(const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { VLOG(3) << "Checking DSFusion"; HloInstruction* shape_covering_instr = GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2, stacked_operand_idx); if (shape_covering_instr == nullptr) { return std::nullopt; } HloInstruction* bitcast_operand = nullptr; if (Match(instr->fused_instructions_computation()->root_instruction(), match::Bitcast(match::Op(&bitcast_operand)))) { if (bitcast_operand == shape_covering_instr) { PatternInfo pattern_info; pattern_info.type = PatternType::DSFusionPattern; pattern_info.instr = instr; const Shape& slice_shape = shape_covering_instr->shape(); const int64_t num_layers = instr->operand(0)->shape().dimensions(0); pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(slice_shape, num_layers); pattern_info.unstacking_computation = instr->fused_instructions_computation(); pattern_info.unstacked_instrs.push_back(instr); return pattern_info; } } return std::nullopt; } absl::Status UnstackDSFusionPattern( HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) { HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent(); HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0); HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1); HloInstruction* new_operand = parent_loop->AddInstruction(HloInstruction::CreateCustomCall( slice_shape, {stacked, offset}, "DynamicGte")); HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction( HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(), new_operand)); return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape( bitcast); } std::optional<PatternInfo> GetDSFusionNoBitcastPattern( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { VLOG(3) << "Checking DSFusionNoBitcast"; HloInstruction* shape_covering_instr = GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2, stacked_operand_idx); if (shape_covering_instr == nullptr) { return std::nullopt; } if (instr->fused_instructions_computation()->root_instruction() != shape_covering_instr) { return std::nullopt; } PatternInfo pattern_info; pattern_info.type = PatternType::DSFusionNoBitcastPattern; pattern_info.instr = instr; const Shape& slice_shape = shape_covering_instr->shape(); const int64_t num_layers = instr->operand(0)->shape().dimensions(0); pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(slice_shape, num_layers); pattern_info.unstacking_computation = instr->fused_instructions_computation(); pattern_info.unstacked_instrs.push_back(instr); return pattern_info; } absl::Status UnstackDSFusionNoBitcastPattern( HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) { HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent(); HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0); HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1); HloInstruction* new_operand = parent_loop->AddInstruction(HloInstruction::CreateCustomCall( slice_shape, {stacked, offset}, "DynamicGte")); return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape( new_operand); } std::optional<PatternInfo> GetDUSFusionPattern( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { VLOG(3) << "Checking DUSFusion"; HloInstruction* shape_covering_instr = GetMostMajorShapeCoveringDynamicIndexInFusion( metadata, instr, HloOpcode::kDynamicUpdateSlice, 3, stacked_operand_idx); if (shape_covering_instr == nullptr) { return std::nullopt; } if (Match(shape_covering_instr->operand(1), match::Bitcast(match::Parameter()))) { if (shape_covering_instr->parent()->root_instruction() == shape_covering_instr) { PatternInfo pattern_info; pattern_info.type = PatternType::Other; pattern_info.instr = instr; pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice( instr->operand(2)->shape(), instr->operand(0)->shape().dimensions(0)); pattern_info.unstacking_computation = nullptr; pattern_info.unstacked_instrs.push_back(instr); return pattern_info; } } return std::nullopt; } absl::Status UnstackDUSFusionPattern( HloInstruction* mutable_dynamic_update_slicing_fusion, const Shape& slice_shape) { HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent(); HloInstruction* stacked = mutable_dynamic_update_slicing_fusion->mutable_operand(0); HloInstruction* offset = mutable_dynamic_update_slicing_fusion->mutable_operand(1); HloInstruction* update = mutable_dynamic_update_slicing_fusion->mutable_operand(2); HloInstruction* new_operand = parent_loop->AddInstruction(HloInstruction::CreateCustomCall( stacked->shape(), {stacked, update, offset}, "DynamicTuple")); for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) { TF_RETURN_IF_ERROR( mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape( user, new_operand)); } return absl::OkStatus(); } std::optional<PatternInfo> GetDUSFusionWithPadPattern( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { VLOG(3) << "Checking DUSFusionWithPad"; HloInstruction* shape_covering_instr = GetMostMajorShapeCoveringDynamicIndexInFusion( metadata, instr, HloOpcode::kDynamicUpdateSlice, 3, stacked_operand_idx); if (shape_covering_instr == nullptr) { return std::nullopt; } if (Match( shape_covering_instr->operand(1), match::Bitcast(match::Pad(match::Parameter(), match::Constant())))) { if (shape_covering_instr->parent()->root_instruction() == shape_covering_instr) { const HloInstruction* pad_instr = shape_covering_instr->operand(1)->operand(0); PatternInfo pattern_info; pattern_info.type = PatternType::Other; pattern_info.instr = instr; pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice( pad_instr->shape(), shape_covering_instr->operand(0)->shape().dimensions(0)); pattern_info.unstacking_computation = nullptr; pattern_info.unstacked_instrs.push_back(instr); return pattern_info; } } return std::nullopt; } absl::Status UnstackDUSFusionWithPadPattern( HloInstruction* mutable_dynamic_update_slicing_fusion, const Shape& slice_shape) { HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent(); HloComputation* fused_computation = mutable_dynamic_update_slicing_fusion->fused_instructions_computation(); HloInstruction* stacked = mutable_dynamic_update_slicing_fusion->mutable_operand( fused_computation->root_instruction() ->mutable_operand(0) ->parameter_number()); HloInstruction* offset = mutable_dynamic_update_slicing_fusion->mutable_operand( fused_computation->root_instruction() ->mutable_operand(2) ->parameter_number()); HloInstruction* pad_instr = fused_computation->root_instruction() ->mutable_operand(1) ->mutable_operand(0); fused_computation->set_root_instruction(pad_instr, true); *mutable_dynamic_update_slicing_fusion->mutable_shape() = pad_instr->shape(); HloInstruction* new_operand = parent_loop->AddInstruction(HloInstruction::CreateCustomCall( stacked->shape(), {stacked, mutable_dynamic_update_slicing_fusion, offset}, "DynamicTuple")); for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) { if (user != new_operand) { TF_RETURN_IF_ERROR( mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape( user, new_operand)); } } return absl::OkStatus(); } std::optional<PatternInfo> GetDSFusionWithAddPattern( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { VLOG(3) << "Checking DSFusionWithAdd"; HloInstruction* shape_covering_instr = GetMostMajorShapeCoveringDynamicIndexInFusion( metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx); if (shape_covering_instr == nullptr) { return std::nullopt; } HloComputation* fused_computation = instr->fused_instructions_computation(); HloInstruction* fusion_root = fused_computation->root_instruction(); HloInstruction* add_operand; if (Match(fusion_root, match::Reduce(match::Add(match::Op(&add_operand), match::Broadcast(match::Constant())), match::Constant()))) { if (add_operand == shape_covering_instr) { const int64_t num_layers = instr->operand(0)->shape().dimensions(0); PatternInfo pattern_info; pattern_info.type = PatternType::Other; pattern_info.instr = instr; pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(instr->shape(), num_layers); HloComputation::Builder builder("unstack_add"); HloInstruction* p0 = builder.AddInstruction(HloInstruction::CreateParameter( 0, fused_computation->parameter_instruction(0)->shape(), "p0")); HloInstruction* p1 = builder.AddInstruction(HloInstruction::CreateParameter( 1, fused_computation->parameter_instruction(1)->shape(), "p1")); HloInstruction* zero = builder.AddInstruction(MakeScalarConstantWithShape(p1->shape(), 0)); std::vector<HloInstruction*> slice_starts; slice_starts.reserve(shape_covering_instr->shape().rank()); slice_starts.push_back(p1); for (int64_t i = 0; i < shape_covering_instr->shape().rank() - 1; i++) { slice_starts.push_back(zero); } HloInstruction* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice( shape_covering_instr->shape(), p0, slice_starts, shape_covering_instr->dynamic_slice_sizes())); HloInstruction* zero_reduce = builder.AddInstruction(MakeScalarConstantWithShape( ShapeUtil::MakeScalarShape(slice->shape().element_type()), 0)); HloInstruction* reduce = builder.AddInstruction(HloInstruction::CreateReduce( instr->shape(), slice, zero_reduce, fusion_root->dimensions(), fused_computation->root_instruction()->to_apply())); HloComputation* unstack_add = instr->GetModule()->AddEmbeddedComputation(builder.Build()); unstack_add->set_root_instruction(reduce); pattern_info.unstacking_computation = unstack_add; pattern_info.unstacked_instrs.push_back(instr); return pattern_info; } } return std::nullopt; } absl::Status UnstackDSFusionWithAddPattern( HloInstruction* mutable_dynamic_slice_with_add_fusion, const Shape& slice_shape) { HloComputation* parent_loop = mutable_dynamic_slice_with_add_fusion->parent(); HloInstruction* stacked = mutable_dynamic_slice_with_add_fusion->mutable_operand(0); HloInstruction* offset = mutable_dynamic_slice_with_add_fusion->mutable_operand(1); HloInstruction* new_operand = parent_loop->AddInstruction(HloInstruction::CreateCustomCall( slice_shape, {stacked, offset}, "DynamicGte")); HloInstruction* one = parent_loop->AddInstruction(MakeScalarConstantWithShape( ShapeUtil::MakeScalarShape(slice_shape.element_type()), 1)); HloInstruction* broadcast = parent_loop->AddInstruction( HloInstruction::CreateBroadcast(slice_shape, one, {})); HloInstruction* add = mutable_dynamic_slice_with_add_fusion->AddInstruction( HloInstruction::CreateBinary(new_operand->shape(), HloOpcode::kAdd, new_operand, broadcast)); TF_RETURN_IF_ERROR( mutable_dynamic_slice_with_add_fusion->ReplaceAllUsesWith(add)); return absl::OkStatus(); } std::optional<PatternInfo> GetNestedDSFusionPattern( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { if (instr->opcode() != HloOpcode::kFusion) { return std::nullopt; } if (!metadata.unrollable_loop_bodies.contains(instr->parent())) { VLOG(5) << "Instruction not inside unrollable while body, " << instr->name() << " inside " << instr->parent()->name(); return std::nullopt; } WhileLoopConfig while_instr_config = metadata.unrollable_loop_bodies.at(instr->parent()); VLOG(3) << "Checking NestedDSFusionPattern"; HloInstruction* inner_fusion_user = nullptr; for (HloInstruction* fused_instr : instr->fused_instructions_computation()->MakeInstructionPostOrder()) { if (Match(fused_instr, match::Parameter(stacked_operand_idx))) { if (fused_instr->user_count() != 1) { return std::nullopt; } if (Match(fused_instr->users()[0], match::Fusion(match::Op(), match::Op()))) { inner_fusion_user = fused_instr->users()[0]; break; } } } if (inner_fusion_user == nullptr) { return std::nullopt; } for (HloInstruction* inner_fusion_instr : inner_fusion_user->fused_instructions_computation() ->MakeInstructionPostOrder()) { if (!Match(inner_fusion_instr, match::DynamicSlice())) { continue; } std::optional<int64_t> dynamic_index = MatchEffectivelyStaticDynamicSliceInsideLoop( inner_fusion_instr, inner_fusion_user->fused_instructions_computation() ->parameter_instruction(0), while_instr_config); if (dynamic_index.has_value() && dynamic_index.value() == 0) { const int64_t num_layers = inner_fusion_user->operand(0)->shape().dimensions(0); PatternInfo pattern_info; pattern_info.type = PatternType::NestedDSFusionPattern; pattern_info.instr = inner_fusion_user; pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(inner_fusion_instr->shape(), num_layers); pattern_info.unstacking_computation = inner_fusion_user->fused_instructions_computation(); pattern_info.unstacked_instrs.push_back(inner_fusion_user); return pattern_info; } } return std::nullopt; } absl::Status UnstackNestedDSFusionPattern( HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) { HloInstruction* parent_fusion = mutable_dynamic_slicing_fusion->parent()->FusionInstruction(); HloInstruction* stacked_in_ds_fusion = mutable_dynamic_slicing_fusion->mutable_operand(0); CHECK_EQ(stacked_in_ds_fusion->opcode(), HloOpcode::kParameter); int64_t stacked_param_number = stacked_in_ds_fusion->parameter_number(); HloInstruction* stacked = parent_fusion->mutable_operand(stacked_param_number); HloInstruction* offset_in_ds_fusion = mutable_dynamic_slicing_fusion->mutable_operand(1); CHECK_EQ(offset_in_ds_fusion->opcode(), HloOpcode::kParameter); HloInstruction* offset = parent_fusion->mutable_operand(offset_in_ds_fusion->parameter_number()); HloInstruction* sliced_param = parent_fusion->fused_instructions_computation()->ReplaceParameter( stacked_param_number, HloInstruction::CreateParameter(stacked_param_number, slice_shape, "sliced")); HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction( HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(), sliced_param)); HloInstruction* bitcast_fusion = mutable_dynamic_slicing_fusion->AddInstruction( HloInstruction::CreateFusion(mutable_dynamic_slicing_fusion->shape(), HloInstruction::FusionKind::kLoop, bitcast)); TF_RETURN_IF_ERROR( mutable_dynamic_slicing_fusion->ReplaceAllUsesWith(bitcast_fusion)); HloInstruction* new_operand = parent_fusion->AddInstruction(HloInstruction::CreateCustomCall( slice_shape, {stacked, offset}, "DynamicGte")); return parent_fusion->ReplaceOperandWithDifferentShape( sliced_param->parameter_number(), new_operand); } std::optional<PatternInfo> GetDSAndDUSPattern(const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { VLOG(3) << "Checking DSAndDUSPattern"; if (instr->opcode() != HloOpcode::kFusion) { return std::nullopt; } const HloInstruction* stacked = instr->operand(stacked_operand_idx); if (stacked->user_count() != 2) { return std::nullopt; } HloInstruction* shape_covering_ds_instr = GetMostMajorShapeCoveringDynamicIndexInFusion( metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx); if (shape_covering_ds_instr == nullptr) { return std::nullopt; } HloInstruction* bitcast_operand = nullptr; if (!Match(instr->fused_instructions_computation()->root_instruction(), match::Bitcast(match::Op(&bitcast_operand)))) { return std::nullopt; } if (bitcast_operand != shape_covering_ds_instr) { return std::nullopt; } if (!GetDUSFusionPattern(metadata, stacked->users()[1], stacked->users()[1]->operand_index(stacked))) { return std::nullopt; } PatternInfo pattern_info; pattern_info.type = PatternType::Other; pattern_info.instr = instr; const Shape& slice_shape = instr->shape(); const int64_t num_layers = instr->operand(0)->shape().dimensions(0); pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(slice_shape, num_layers); pattern_info.unstacking_computation = instr->fused_instructions_computation(); pattern_info.unstacked_instrs.push_back(instr); pattern_info.unstacked_instrs.push_back(stacked->users()[1]); return pattern_info; } absl::Status UnstackDSAndDUSPattern(HloInstruction* mutable_dynamic_slice, const Shape& slice_shape) { HloInstruction* stacked_gte = mutable_dynamic_slice->mutable_operand(0); int64_t stacked_gte_index = stacked_gte->tuple_index(); HloComputation* parent = stacked_gte->parent(); ShapeUtil::UpdateTupleShape(stacked_gte->shape(), stacked_gte_index, parent->root_instruction()->mutable_shape()); HloComputation* parent_loop = mutable_dynamic_slice->parent(); HloInstruction* stacked = mutable_dynamic_slice->mutable_operand(0); HloInstruction* offset = mutable_dynamic_slice->mutable_operand(1); HloInstruction* new_operand = parent_loop->AddInstruction(HloInstruction::CreateCustomCall( slice_shape, {stacked, offset}, "DynamicGte")); TF_RETURN_IF_ERROR( mutable_dynamic_slice->ReplaceAllUsesWithDifferentShape(new_operand)); HloInstruction* mutable_dynamic_update_slice = stacked_gte->users()[1]; TF_RETURN_IF_ERROR( UnstackDUSFusionPattern(mutable_dynamic_update_slice, slice_shape)); return absl::OkStatus(); } std::optional<PatternInfo> GetReduceFusionPattern( const UnstackerMetadata& metadata, const HloInstruction* instr, int64_t stacked_operand_idx) { VLOG(3) << "Checking ReduceFusion"; HloInstruction* shape_covering_instr = GetMostMajorShapeCoveringDynamicIndexInFusion( metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx); if (shape_covering_instr == nullptr) { return std::nullopt; } HloInstruction* reduce_operand = nullptr; HloInstruction* fusion_root = instr->fused_instructions_computation()->root_instruction(); if (Match(fusion_root, match::Reduce(match::Op(&reduce_operand), match::ConstantScalar())) && Match(fusion_root->to_apply()->root_instruction(), match::Add(match::Parameter(), match::Parameter()))) { if (reduce_operand == shape_covering_instr) { PatternInfo pattern_info; pattern_info.type = PatternType::Other; pattern_info.instr = instr; const Shape& slice_shape = instr->shape(); const int64_t num_layers = instr->operand(0)->shape().dimensions(0); pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(slice_shape, num_layers); pattern_info.unstacking_computation = instr->fused_instructions_computation(); pattern_info.unstacked_instrs.push_back(instr); return pattern_info; } } return std::nullopt; } absl::Status UnstackReduceFusionPattern(HloInstruction* mutable_reduce_fusion, const Shape& slice_shape) { HloComputation* parent_loop = mutable_reduce_fusion->parent(); HloInstruction* stacked = mutable_reduce_fusion->mutable_operand(0); HloInstruction* offset = mutable_reduce_fusion->mutable_operand(1); HloInstruction* new_operand = parent_loop->AddInstruction(HloInstruction::CreateCustomCall( slice_shape, {stacked, offset}, "DynamicGte")); return mutable_reduce_fusion->ReplaceAllUsesWithDifferentShape(new_operand); } }; absl::StatusOr<bool> HloUnstacker::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_ASSIGN_OR_RETURN(auto metadata, UnstackerMetadata::Create(module, unfuse_slice_)); metadata.custom_handlers.push_back( std::make_pair(GetDSAndDUSPattern, UnstackDSAndDUSPattern)); metadata.custom_handlers.push_back( std::make_pair(GetDSFusionPattern, UnstackDSFusionPattern)); metadata.custom_handlers.push_back( std::make_pair(GetDUSFusionPattern, UnstackDUSFusionPattern)); metadata.custom_handlers.push_back(std::make_pair( GetDUSFusionWithPadPattern, UnstackDUSFusionWithPadPattern)); metadata.custom_handlers.push_back( std::make_pair(GetDSFusionWithAddPattern, UnstackDSFusionWithAddPattern)); metadata.custom_handlers.push_back( std::make_pair(GetReduceFusionPattern, UnstackReduceFusionPattern)); metadata.custom_handlers.push_back( std::make_pair(GetNestedDSFusionPattern, UnstackNestedDSFusionPattern)); metadata.custom_handlers.push_back(std::make_pair( GetDSFusionNoBitcastPattern, UnstackDSFusionNoBitcastPattern)); std::vector<HloInstruction*> entry_loops; for (HloInstruction* instr : module->entry_computation()->MakeInstructionPostOrder()) { if (Match(instr, match::While(match::Tuple())) && Match(instr->while_body()->root_instruction(), match::Tuple())) { entry_loops.push_back(instr); } } bool unstacked = false; std::vector<const HloInstruction*> unstacked_instructions; for (HloInstruction* loop : entry_loops) { for (int64_t i = 0; i < loop->shape().tuple_shapes_size(); ++i) { if (loop->while_init()->operand(i)->shape().IsTuple()) { continue; } VLOG(3) << "Attempting to unstack " << loop->name() << " at " << i << " = " << loop->while_init()->operand(i)->shape().ToString(true) << loop->while_init()->operand(i)->ToShortString(); unstacked |= UnstackWhileOperandAtIndex(metadata, loop, i, unstacked_instructions); VLOG(3) << "###################"; } } if (!unstacked) { return false; } TF_RETURN_IF_ERROR(module->RemoveUnusedComputations()); std::vector<HloInstruction*> loops_to_unroll; for (const HloInstruction* instr : unstacked_instructions) { HloInstruction* loop = metadata.bodies[instr->parent()]; if (std::find(loops_to_unroll.begin(), loops_to_unroll.end(), loop) == loops_to_unroll.end()) { loops_to_unroll.push_back(loop); } } for (int64_t i = loops_to_unroll.size() - 1; i >= 0; --i) { HloInstruction* loop = loops_to_unroll[i]; TF_ASSIGN_OR_RETURN(UnrollResult unroll_result, WhileLoopUnroller::UnrollAndReturnReplacement( loop, -1, false, true, false)); bool unrolled = unroll_result.unrolled; CHECK(unrolled); } VLOG(3) << "after unstacking \n" << module->ToString(); return true; } }
#include "xla/service/hlo_unstacker.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using UnstackerTest = HloTestBase; int64_t GetInstrCountWithOpcodeInEntry(HloModule* module, HloOpcode opcode) { int64_t instr_with_opcode_count = 0; for (HloInstruction* instr : module->entry_computation()->MakeInstructionPostOrder()) { if (instr->opcode() == opcode) { instr_with_opcode_count++; } } return instr_with_opcode_count; } TEST_F(UnstackerTest, UnstackDSFusionPattern) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) %fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion), 0); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, NotUnstackDSFusionPattern) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.tuple { %param_0.51117 = s8[3,128,128] parameter(0) mult = multiply(param_0.51117, param_0.51117) ROOT out = tuple(param_0.51117, mult) } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) %fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf fusion_mult = (s8[3,128,128], s8[3,128,128]) fusion(s8[3,128,128] p1), kind=kLoop, calls=%fused_computation.tuple mult = s8[3,128,128] get-tuple-element(fusion_mult), index=1 ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, mult) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_FALSE(unstacked); } TEST_F(UnstackerTest, UnstackReduceFusionPattern) { std::string hlo_string = R"( HloModule SimpleLoop dynamic-slice.609.reduce_sub_computation { lhs.53 = s8[] parameter(0) rhs.53 = s8[] parameter(1) ROOT add.3090 = s8[] add(lhs.53, rhs.53) } fused_computation.1096.clone { param_0.5572 = s8[3,128,128] parameter(0) param_1.6711 = s32[]{:T(128)} parameter(1) constant.12008 = s32[]{:T(128)} constant(0) dynamic-slice.1545 = s8[1,128,128] dynamic-slice(param_0.5572, param_1.6711, constant.12008, constant.12008), dynamic_slice_sizes={1,128, 128} constant.12009 = s8[] constant(-0) ROOT reduce.919 = s8[128,128] reduce(dynamic-slice.1545, constant.12009), dimensions={0}, to_apply=dynamic-slice.609.reduce_sub_computation } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) %fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.1096.clone conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcast) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) %fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830) conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion), 0); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcastKeepFused) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) %fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830) conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); auto unfuse = [](HloInstruction* instruction) { return false; }; TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker(unfuse).Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 0); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion), 3); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackDSFusionPatternWithDifferentLayout) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.30.clone (param_0.153: bf16[32,4,64,64,3], param_1.123: s32[]) -> bf16[64,4,64,3] { %param_0.153 = bf16[32,4,64,64,3]{2,1,4,3,0} parameter(0) %param_1.123 = s32[]{:T(128)} parameter(1) %constant.227 = s32[]{:T(128)} constant(0) %dynamic-slice.5 = bf16[1,4,64,64,3]{2,1,4,3,0} dynamic-slice(bf16[32,4,64,64,3]{2,1,4,3,0} %param_0.153, s32[]{:T(128)} %param_1.123, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227), dynamic_slice_sizes={1,4,64,64,3} ROOT %bitcast.102 = bf16[64,4,64,3]{0,1,3,2} bitcast(bf16[1,4,64,64,3]{2,1,4,3,0} %dynamic-slice.5) } %while.body (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> (s32[], bf16[8,128], bf16[32,4,64,64,3]) { wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = bf16[32,4,64,64,3]{2,1,4,3,0} get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) %fusion.67830 = bf16[64,4,64,3]{0,1,3,2} fusion(p1, i), kind=kLoop, calls=%fused_computation.30.clone ROOT out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(inc, p0, p1) } %while.cond (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> pred[] { wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(32) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = bf16[32,4,64,64,3] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) while(while.input), condition=%while.cond , body=%while.body while_use = bf16[32,4,64,64,3] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 32); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion), 0); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt)); } TEST_F(UnstackerTest, UnstackNestedDSFusionPattern) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[3,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) fusion.conv = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDynamicIndex) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[6,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[6,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[6,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[6,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[6,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[6,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> (s32[], bf16[8,128], s8[6,128,128]) { wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[6,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) two = s32[] constant(2) mult = s32[] multiply(i, two) fusion.conv = bf16[8,128] fusion(p0, p1, mult), kind=kOutput, calls=%fused_computation.inner ROOT out = (s32[], bf16[8,128], s8[6,128,128]) tuple(inc, fusion.conv, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[6,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[6,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[6,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[6,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithMultipleIndex) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice.1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[4,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.slice.2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[4,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[4,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1 ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[4,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2 ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) { wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[4,128,128] get-tuple-element(wide_p), index=2 p2 = s8[4,128,128] get-tuple-element(wide_p), index=3 one = s32[] constant(1) inc = s32[] add(i, one) fusion.conv.1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1 fusion.conv.2 = bf16[8,128] fusion(p0, p2, i), kind=kOutput, calls=%fused_computation.inner.2 plus = bf16[8,128] add(fusion.conv.1, fusion.conv.2) ROOT out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(inc, plus, p1, p2) } %while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(4) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[4,128,128] parameter(0) p1 = s8[4,128,128] parameter(1) p2 = bf16[8,128] parameter(2) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(init, p2, p0, p1) while.out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDiffereOperandsOrder) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner (param_1.30691: s8[3,128,128], p2: s32[], param_0.34523: bf16[8,128]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(2) %param_1.30691 = s8[3,128,128] parameter(0) p2 = s32[] parameter(1) %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) fusion.conv = bf16[8,128] fusion(p1, i, p0), kind=kOutput, calls=%fused_computation.inner ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithSameUnstackingComps) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[3,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1 ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[3,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2 ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) fusion.conv1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1 fusion.conv2 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.2 add = bf16[8,128] add(fusion.conv1, fusion.conv2) ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, add, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, NotUnstackNestedDSFusionPatternWithSameUnstackingComps) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} } %fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[3,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 p0 = bf16[8,128] get-tuple-element(wide_p), index=1 p1 = s8[3,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) %fusion.67831 = s8[128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.2 %fusion.67830 = s8[1,128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.1 %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830) ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, p0, p1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(3) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } ENTRY main { p0 = s8[3,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(0) while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0) while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body while_use = s8[3,128,128] get-tuple-element(while.out), index=2 ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_FALSE(unstacked); } TEST_F(UnstackerTest, UnstackNestedDSFusionPatternSingleNestedLoop) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[4,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[4,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1 inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1) } %while.cond.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(4) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } %while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 param0 = bf16[8,128] get-tuple-element(wide_p), index=1 param1 = s8[4,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(2) zero = s32[] constant(0) mult = s32[] multiply(i, one) inner.in = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1) inner.out = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in), condition=%while.cond.inner, body=%while.body.inner fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out), index=1 ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1) } %while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(20) add = s32[] add(%constant.12857, %constant.12857) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT } ENTRY main { weight = s8[4,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(1) while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight) while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body ROOT out = bf16[8,128] get-tuple-element(while.out), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 4); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackNestedDSFusionPatternTwoNestedLoops) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[4,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[4,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice1 ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1 inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner1 ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1) } %while.cond.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(4) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } %while.body1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 param0 = bf16[8,128] get-tuple-element(wide_p), index=1 param1 = s8[4,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(2) zero = s32[] constant(0) mult = s32[] multiply(i, one) inner.in.1 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1) inner.out.1 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.1), condition=%while.cond.inner1, body=%while.body.inner1 fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.1), index=1 ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1) } %while.cond1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(20) add = s32[] add(%constant.12857, %constant.12857) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT } %fused_computation.slice2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] { %param_0.51117 = s8[4,128,128] parameter(0) p1 = s32[] parameter(1) %constant.85694 = s32[] constant(0) %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128} ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040) } %fused_computation.inner2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] { %param_0.34523 = bf16[8,128] parameter(0) %param_1.30691 = s8[4,128,128] parameter(1) p2 = s32[] parameter(2) %fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice2 ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf } %while.body.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1 inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(1) inc = s32[] add(i, one) fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner2 ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1) } %while.cond.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(4) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT } %while.body2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 param0 = bf16[8,128] get-tuple-element(wide_p), index=1 param1 = s8[4,128,128] get-tuple-element(wide_p), index=2 one = s32[] constant(2) zero = s32[] constant(0) mult = s32[] multiply(i, one) inner.in.2 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1) inner.out.2 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.2), condition=%while.cond.inner2, body=%while.body.inner2 fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.2), index=1 ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1) } %while.cond2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] { wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0) i = s32[] get-tuple-element(wide_p), index=0 %constant.12857 = s32[] constant(20) add = s32[] add(%constant.12857, %constant.12857) ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT } ENTRY main { weight = s8[4,128,128] parameter(0) p1 = bf16[8,128] parameter(1) init = s32[] constant(1) while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight) while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond1 , body=%while.body1 init2 = s32[] get-tuple-element(while.out), index=0 second.while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init2, p1, weight) second.while.out = (s32[], bf16[8,128], s8[4,128,128]) while(second.while.input), condition=%while.cond2 , body=%while.body2 out = bf16[8,128] get-tuple-element(while.out), index=1 second.out = bf16[8,128] get-tuple-element(second.while.out), index=1 ROOT result = bf16[8,128] add(out, second.out) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackDSAndDUSPattern) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: s32[4,3], offset: s32[]) -> s32[3] { %param_0.51117 = s32[4,3] parameter(0) offset = s32[] parameter(1) zero = s32[] constant(0) %dynamic-slice.22040 = s32[1,3] dynamic-slice(s32[4,3] %param_0.51117, offset, zero), dynamic_slice_sizes={1,3} ROOT %bitcast.31250 = s32[3] bitcast(s32[1,3] %dynamic-slice.22040) } %fused_computation.update.slice (param_0.51117: s32[4,3], p1: s32[], p2: s32[3]) -> s32[4,3] { %param_0.51117 = s32[4,3] parameter(0) %p1 = s32[] parameter(1) %p2 = s32[3] parameter(2) %zero = s32[] constant(0) %bitcast.31250 = s32[1,3] bitcast(%p2) ROOT output_dus = s32[4,3]{1,0} dynamic-update-slice(%param_0.51117, %bitcast.31250, %p1, zero) } SimpleLoop.body { loop_var.1 = (s32[], s32[4,3]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 get-tuple-element.2 = s32[4,3] get-tuple-element(loop_var.1), index=1 zero = s32[] constant(0) some_const = s32[3] constant({0,1,2}) constant.1 = s32[] constant(1) idx = s32[] add(get-tuple-element.1, constant.1) ds = s32[3]{0} fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice update = s32[3] add(ds, ds) dus = s32[3] dynamic-update-slice(ds, update, zero) output = s32[4,3] fusion(get-tuple-element.2, get-tuple-element.1, dus), kind=kLoop, calls=%fused_computation.update.slice ROOT tuple = (s32[], s32[4,3]) tuple(idx, output) } SimpleLoop.condition { loop_var.1 = (s32[], s32[4,3]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.2 = s32[] constant(4) ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT } ENTRY SimpleLoop { reference = s32[4,3] parameter(0) zero = s32[] constant(0) zero1 = s32[] constant(0) one = s32[] constant(1) tuple.1 = (s32[], s32[4,3]) tuple(zero, reference) while = (s32[], s32[4,3]) while(tuple.1), condition=SimpleLoop.condition, body=SimpleLoop.body ROOT out = s32[] get-tuple-element(while), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackDSAndDUSPatternNestedLoop) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] { %param_0.51117 = bf16[4,1,8,257,128] parameter(0) offset = s32[] parameter(1) zero = s32[] constant(0) %dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128} ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040) } %fused_computation.slice.2 (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] { %param_0.51117 = bf16[4,1,8,257,128] parameter(0) offset = s32[] parameter(1) zero = s32[] constant(0) %dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128} ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040) } inner.body { loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1 get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2 sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice sliced.2 = bf16[1,8,257,128] fusion(get-tuple-element.3, get-tuple-element.1), kind=kLoop,calls=%fused_computation.slice.2 temp = bf16[1,8,257,128] add(sliced, sliced.2) one = s32[] constant(1) idx = s32[] add(get-tuple-element.1, one) ROOT out = tuple(idx, get-tuple-element.2, get-tuple-element.3) } inner.condition { loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.2 = s32[] constant(4) ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT } outer.body { loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1 get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2 zero = s32[] constant(0) buffer = bf16[4,1,8,257,128] custom-call(), custom_call_target="AllocateBuffer" inner.input = tuple(zero, buffer, get-tuple-element.2) inner = while(inner.input), condition=inner.condition, body=inner.body out1 = bf16[4,1,8,257,128] get-tuple-element(inner), index=1 one = s32[] constant(1) idx = s32[] add(get-tuple-element.1, one) ROOT tuple = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) tuple(idx, out1, get-tuple-element.3) } outer.condition { loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.2 = s32[] constant(4) mul = s32[] multiply(get-tuple-element.1, constant.2) ROOT less-than = pred[] compare(get-tuple-element.1, mul), direction=LT } ENTRY SimpleLoop { param1 = bf16[4,1,8,257,128] parameter(0) param2 = bf16[4,1,8,257,128] parameter(1) zero = s32[] constant(0) zero1 = s32[] constant(0) one = s32[] constant(1) tuple.1 = tuple(zero, param1, param2) while = while(tuple.1), condition=outer.condition, body=outer.body ROOT out = s32[] get-tuple-element(while), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } TEST_F(UnstackerTest, UnstackDSAndDUSPatternLoopFeedingLoop) { std::string hlo_string = R"( HloModule SimpleLoop %fused_computation.update.slice (param_0.51117: bf16[4,1,8,257,128], p1: s32[], param_0.51118: bf16[1,8,257,128]) -> bf16[4,1,8,257,128] { %param_0.51117 = bf16[4,1,8,257,128] parameter(0) p1 = s32[] parameter(1) %param_0.51118 = bf16[1,8,257,128] parameter(2) bitcast = bf16[1,1,8,257,128] bitcast(param_0.51118) %constant.85694 = s32[] constant(0) ROOT %dynamic-update-slice.22040 = bf16[4,1,8,257,128] dynamic-update-slice(bf16[4,1,8,257,128] %param_0.51117, bitcast, p1, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694) } %fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset:s32[]) -> bf16[1,8,257,128] { %param_0.51117 = bf16[4,1,8,257,128] parameter(0) offset = s32[] parameter(1) zero = s32[] constant(0) %dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128} ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040) } first.body { loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0 get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1 constant = bf16[1,8,257,128] constant({...}) sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice tmp = bf16[1,8,257,128] add(sliced, sliced) one = s32[] constant(1) idx = s32[] add(get-tuple-element.1, one) ROOT out = tuple(idx, get-tuple-element.2) } first.condition { loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.2 = s32[] constant(4) ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT } next.body { loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0 get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1 constant = bf16[1,8,257,128] constant({...}) update.sliced = bf16[4,1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1, constant), kind=kLoop, calls=%fused_computation.update.slice one = s32[] constant(1) idx = s32[] add(get-tuple-element.1, one) ROOT out = tuple(idx, update.sliced) } next.condition { loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.2 = s32[] constant(4) ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT } ENTRY SimpleLoop { param1 = bf16[4,1,8,257,128] parameter(0) param2 = bf16[4,1,8,257,128] parameter(1) zero = s32[] constant(0) zero1 = s32[] constant(0) one = s32[] constant(1) tuple.1 = tuple(zero, param1) while = while(tuple.1), condition=first.condition, body=first.body while.out = bf16[4,1,8,257,128] get-tuple-element(while), index=1 next.input = tuple(zero, while.out) next = while(next.input), condition=next.condition, body=next.body ROOT out = s32[] get-tuple-element(next), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); } TEST_F(UnstackerTest, UnstackDUSFusionWithPadPatternLoopFeedingLoop) { std::string hlo_string = R"( HloModule SimpleLoop fused_computation.75.clone { param_0.5713 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0) param_2.4396 = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} parameter(2) constant.12166 = bf16[]{:T(256)} constant(0) pad.496 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} pad(param_2.4396, constant.12166), padding=0_0x0_0x0_256x0_0 bitcast.1262 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} bitcast(pad.496) param_1.6823 = s32[]{:T(128)} parameter(1) constant.12165 = s32[]{:T(128)} constant(0) ROOT dynamic-update-slice.193 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(param_0.5713, bitcast.1262, param_1.6823, constant.12165, constant.12165, constant.12165, constant.12165) } fused_computation.1 { param_0.5712 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}parameter(0) param_1.6822 = s32[]{:T(128)} parameter(1) constant.12164 = s32[]{:T(128)} constant(0) dynamic-slice.1597 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-slice(param_0.5712, param_1.6822, constant.12164, constant.12164, constant.12164, constant.12164), dynamic_slice_sizes={1,1,8,513,128} ROOT bitcast.1261 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} bitcast(dynamic-slice.1597) } first.body { wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0) get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0 constant.12144..sunk.2 = s32[]{:T(128)} constant(1) add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2) get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1 fusion.2381 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.1 tmp = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} add(fusion.2381, fusion.2381) ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178) } first.cond { wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0) get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0 constant.12162 = s32[]{:T(128)} constant(2) ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT } wide.region_54.2652.clone_spmd { wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0) get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0 constant.12144..sunk.2 = s32[]{:T(128)} constant(1) add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2) get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1 update = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} constant({...}) fusion.2382 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177, update), kind=kLoop, calls=fused_computation.75.clone ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, fusion.2382) } wide.region_55.2732.clone_spmd { wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0) get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0 constant.12162 = s32[]{:T(128)} constant(2) ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT } ENTRY main { p0 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0) init = s32[]{:T(128)} constant(0) first.input = tuple(init, p0) first.out = while(first.input), condition=first.cond , body=first.body o1 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(first.out), index=1 input = tuple(init, o1) out = while(input), condition=wide.region_55.2732.clone_spmd , body=wide.region_54.2652.clone_spmd ROOT res = s32[]{:T(128)} get-tuple-element(out), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); } TEST_F(UnstackerTest, UnstackDUSFusionWithAddPattern) { std::string hlo_string = R"( HloModule SimpleLoop add.2771.reduce_sub_computation { lhs.44 = bf16[] parameter(0) rhs.44 = bf16[] parameter(1) ROOT add.3079 = bf16[] add(lhs.44, rhs.44) } fused_computation.75.clone { param_0.31658 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0) param_1.26202 = s32[]{:T(128)} parameter(1) constant.47557 = s32[]{:T(128)} constant(0) dynamic-slice.12289 = bf16[1,4096]{1,0:T(2,128)(2,1)} dynamic-slice(param_0.31658, param_1.26202, constant.47557), dynamic_slice_sizes={1,4096} constant.47559 = bf16[]{:T(256)} constant(1) broadcast.39214 = bf16[1,4096]{1,0:T(2,128)(2,1)} broadcast(constant.47559), dimensions={} add.13176 = bf16[1,4096]{1,0:T(2,128)(2,1)} add(dynamic-slice.12289, broadcast.39214) constant.47558 = bf16[] constant(-0) ROOT reduce.8210 = bf16[4096]{0:T(1024)(128)(2,1)} reduce(add.13176, constant.47558), dimensions={0}, to_apply=add.2771.reduce_sub_computation } first.body { wide.param.29 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0) get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0 constant.12144..sunk.2 = s32[]{:T(128)} constant(1) add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2) get-tuple-element.12178 = bf16[2,4096]{1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1 fusion.2381 = bf16[4096]{0:T(1024)(128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.75.clone tmp = bf16[4096]{0:T(1024)(128)(2,1)} add(fusion.2381, fusion.2381) ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178) } first.cond { wide.param.28 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0) get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0 constant.12162 = s32[]{:T(128)} constant(2) ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT } ENTRY main { p0 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0) init = s32[]{:T(128)} constant(0) first.input = tuple(init, p0) first.out = while(first.input), condition=first.cond , body=first.body ROOT o1 = s32[]{:T(128)} get-tuple-element(first.out), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto original = module->Clone(); TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get())); EXPECT_TRUE(unstacked); EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original), std::nullopt, false)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9bda38a9-493d-4316-85e9-9599ccb99c9a
cpp
tensorflow/tensorflow
dynamic_padder
third_party/xla/xla/service/dynamic_padder.cc
third_party/xla/xla/service/dynamic_padder_test.cc
#include "xla/service/dynamic_padder.h" #include <cstdint> #include <functional> #include <iterator> #include <set> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/functional/function_ref.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/comparison_util.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/dynamic_parameter_binding.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/service/call_graph.h" #include "xla/service/dynamic_dimension_inference.h" #include "xla/service/dynamic_window_utils.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/hlo_dce.h" #include "xla/service/pattern_matcher.h" #include "xla/service/shape_inference.h" #include "xla/service/tuple_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/tsl/lib/monitoring/gauge.h" #include "xla/util.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { auto* dynamic_padding_gauge = tsl::monitoring::Gauge<bool, 0>::New( "/tensorflow/core/use_dynamic_padding_gauge", "Tracks if dynamic padder is used."); absl::StatusOr<HloInstruction*> ChooseIdentityValue(HloInstruction* inst, int64_t operand_number) { if (inst->IsElementwise()) { return nullptr; } if (inst->opcode() == HloOpcode::kSelectAndScatter || inst->IsCustomCall("DynamicSelectAndScatterSamePadding")) { if (operand_number == 1) { return inst->mutable_operand(2); } TF_RET_CHECK(operand_number == 0); HloComputation* select = inst->called_computations()[0]; if (Match(select->root_instruction(), match::Compare(match::Parameter(), match::Parameter()) .WithComparisonDirection(ComparisonDirection::kGe))) { return inst->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::MinValue(inst->operand(0)->shape().element_type()))); } else { return Unimplemented( "Only select and scatter with `max` as select function is " "supported, got %s", select->ToString()); } } switch (inst->opcode()) { case HloOpcode::kReduce: { auto* reduce = Cast<HloReduceInstruction>(inst); TF_RET_CHECK(operand_number < reduce->input_count()) << "Only data operand with dynamic dimension is valid."; int64_t init_value_index = reduce->input_count() + operand_number; return inst->mutable_operand(init_value_index); } case HloOpcode::kReduceWindow: { auto* reduce_window = Cast<HloReduceWindowInstruction>(inst); TF_RET_CHECK(operand_number < reduce_window->input_count()) << "Only data operand with dynamic dimension is valid."; int64_t init_value_index = reduce_window->input_count() + operand_number; return inst->mutable_operand(init_value_index); } case HloOpcode::kConvolution: case HloOpcode::kDot: { PrimitiveType ptype = inst->operand(0)->shape().element_type(); return inst->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(ptype))); } case HloOpcode::kPad: return inst->mutable_operand(1); case HloOpcode::kScatter: { if (operand_number != 1) { return nullptr; } PrimitiveType indices_ptype = inst->operand(operand_number)->shape().element_type(); return inst->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::MaxValue(indices_ptype))); } case HloOpcode::kParameter: case HloOpcode::kGather: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kGetDimensionSize: case HloOpcode::kSetDimensionSize: case HloOpcode::kConcatenate: case HloOpcode::kReshape: case HloOpcode::kReverse: case HloOpcode::kTuple: case HloOpcode::kAllReduce: case HloOpcode::kReduceScatter: case HloOpcode::kBroadcast: case HloOpcode::kTranspose: case HloOpcode::kSort: case HloOpcode::kSlice: case HloOpcode::kDomain: return nullptr; case HloOpcode::kCustomCall: return nullptr; default: return UnimplementedStrCat("Unimplemented padding for instruction: ", inst->ToString()); } } absl::StatusOr<bool> ReplaceGetSize( HloInstruction* instr, DynamicDimensionInference* dynamic_dimension_inference) { if (instr->opcode() != HloOpcode::kGetDimensionSize) { return false; } HloComputation* computation = instr->parent(); TF_ASSIGN_OR_RETURN(auto legal_shape, ShapeInference::InferGetDimensionSizeShape( instr->operand(0)->shape(), instr->dimension())); TF_RET_CHECK(ShapeUtil::Equal(instr->shape(), legal_shape)) << "instr->shape() " << instr->shape().ToString() << " , " << "legal_shape " << legal_shape.ToString(); TF_RET_CHECK(ShapeUtil::HasPrimitiveType(instr->shape(), S32)); HloInstruction* operand = instr->mutable_operand(0); int64_t dim = instr->dimension(); HloInstruction* dynamic_size = dynamic_dimension_inference->GetDynamicSize(operand, {}, dim); if (dynamic_size != nullptr) { TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(dynamic_size)); dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith( instr, dynamic_size); } else { int32_t size = instr->operand(0)->shape().dimensions(dim); HloInstruction* new_instr = computation->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(size))); TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(new_instr)); dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(instr, new_instr); } return true; } absl::StatusOr<bool> ReplaceSetSize(HloInstruction* instr) { if (instr->opcode() != HloOpcode::kSetDimensionSize) { return false; } TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()( instr->shape(), instr->operand(0)->shape())) << "instr->shape() " << instr->shape().ToString() << " , " << "instruction operand shape " << instr->operand(0)->shape(); HloInstruction* operand = instr->mutable_operand(0); TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand)); return true; } absl::StatusOr<bool> ReplaceSetBound(HloInstruction* instr) { if (instr->opcode() != HloOpcode::kCustomCall || instr->custom_call_target() != "SetBound") { return false; } TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()( instr->shape(), instr->operand(0)->shape())) << "instr->shape() " << instr->shape().ToString() << " , " << "instruction operand shape " << instr->operand(0)->shape(); HloInstruction* operand = instr->mutable_operand(0); TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand)); return true; } bool ShouldSkipPadOnOperand( const HloInstruction* inst, int64_t operand_num, int64_t dimension, const absl::flat_hash_set<absl::string_view>& execution_threads) { switch (inst->opcode()) { case HloOpcode::kConvolution: { if (operand_num == 0) { if (dimension == inst->convolution_dimension_numbers().input_batch_dimension()) { return true; } const auto& spatial_dims = inst->convolution_dimension_numbers().input_spatial_dimensions(); for (int64_t spatial_dim = 0; spatial_dim < spatial_dims.size(); ++spatial_dim) { if (spatial_dims[spatial_dim] == dimension && inst->window().dimensions(spatial_dim).size() == 1) { return true; } } } return operand_num == 1 && (dimension == inst->convolution_dimension_numbers() .kernel_output_feature_dimension()); } case HloOpcode::kDot: { if (operand_num == 0) { return !absl::c_linear_search( inst->dot_dimension_numbers().lhs_contracting_dimensions(), dimension); } return !absl::c_linear_search( inst->dot_dimension_numbers().rhs_contracting_dimensions(), dimension); } case HloOpcode::kReduce: return !absl::c_linear_search(inst->dimensions(), dimension); case HloOpcode::kSelectAndScatter: case HloOpcode::kReduceWindow: return inst->window().dimensions(dimension).size() == 1; case HloOpcode::kAsyncStart: if (!HloInstruction::IsThreadIncluded(inst->async_execution_thread(), execution_threads)) { return true; } return false; default: return false; } } HloInstruction* PadWithScalar(HloInstruction* inst, int64_t dim, HloInstruction* dynamic_size, HloInstruction* padding_scalar) { CHECK(inst != nullptr && dynamic_size != nullptr && padding_scalar != nullptr); const Shape mask_shape = ShapeUtil::MakeShape(xla::S32, inst->shape().dimensions()); const Shape pred_shape = ShapeUtil::MakeShape(xla::PRED, inst->shape().dimensions()); HloInstruction* iota = inst->AddInstruction(HloInstruction::CreateIota(mask_shape, dim)); HloInstruction* broadcasted_effective_size = inst->AddInstruction( HloInstruction::CreateBroadcast(mask_shape, dynamic_size, {})); HloInstruction* pred = inst->AddInstruction(HloInstruction::CreateCompare( pred_shape, iota, broadcasted_effective_size, ComparisonDirection::kLt)); HloInstruction* broadcasted_identity_value = inst->AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeStaticShape(inst->shape()), padding_scalar, {})); HloInstruction* padded = inst->AddInstruction(HloInstruction::CreateTernary( ShapeUtil::MakeStaticShape(inst->shape()), HloOpcode::kSelect, pred, inst, broadcasted_identity_value)); return padded; } HloInstruction* GenerateBinaryMask( HloInstruction* reshape, int64_t input_dim, absl::Span<const int64_t> output_dims, absl::Span<HloInstruction*> output_dynamic_dims, HloInstruction* one, HloInstruction* zero, bool split_input) { Shape input_shape = split_input ? reshape->operand(0)->shape() : reshape->shape(); Shape output_shape = split_input ? reshape->shape() : reshape->operand(0)->shape(); const Shape mask_input_shape = ShapeUtil::MakeShape(xla::S32, {input_shape.dimensions(input_dim)}); const Shape pred_input_shape = ShapeUtil::MakeShape(xla::PRED, {input_shape.dimensions(input_dim)}); HloInstruction* pred_true = reshape->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); HloInstruction* input_shape_pred_mask = reshape->AddInstruction( HloInstruction::CreateBroadcast(pred_input_shape, pred_true, {})); bool need_rewrite = false; HloInstruction* iota = reshape->AddInstruction(HloInstruction::CreateIota(mask_input_shape, 0)); for (int64_t i = 1; i < output_dims.size(); ++i) { if (output_dynamic_dims[output_dims[i]] != nullptr) { need_rewrite = true; break; } } if (!need_rewrite) { return nullptr; } for (int64_t i = output_dims.size() - 1; i > 0; i--) { const int64_t output_dim = output_dims[i]; HloInstruction* dynamic_size = output_dynamic_dims[output_dim]; HloInstruction* static_output_dim_size = reshape->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( output_shape.dimensions(output_dim)))); HloInstruction* broadcasted_static_output_dim_size = reshape->AddInstruction(HloInstruction::CreateBroadcast( mask_input_shape, static_output_dim_size, {})); if (dynamic_size != nullptr) { HloInstruction* dim_index = reshape->AddInstruction(HloInstruction::CreateBinary( mask_input_shape, HloOpcode::kRemainder, iota, broadcasted_static_output_dim_size)); HloInstruction* broadcasted_effective_size = reshape->AddInstruction( HloInstruction::CreateBroadcast(mask_input_shape, dynamic_size, {})); HloInstruction* selected = reshape->AddInstruction(HloInstruction::CreateCompare( pred_input_shape, dim_index, broadcasted_effective_size, ComparisonDirection::kLt)); input_shape_pred_mask = reshape->AddInstruction( HloInstruction::CreateBinary(pred_input_shape, HloOpcode::kAnd, input_shape_pred_mask, selected)); } iota = reshape->AddInstruction( HloInstruction::CreateBinary(mask_input_shape, HloOpcode::kDivide, iota, broadcasted_static_output_dim_size)); } HloInstruction* broadcasted_one = reshape->AddInstruction( HloInstruction::CreateBroadcast(mask_input_shape, one, {})); HloInstruction* broadcasted_zero = reshape->AddInstruction( HloInstruction::CreateBroadcast(mask_input_shape, zero, {})); return reshape->AddInstruction(HloInstruction::CreateTernary( mask_input_shape, HloOpcode::kSelect, input_shape_pred_mask, broadcasted_one, broadcasted_zero)); } absl::StatusOr<bool> RewriteDynamicReshapeSplitInput( HloInstruction* reshape, int64_t input_dim, absl::Span<const int64_t> output_dims, absl::Span<HloInstruction*> output_dynamic_dims, DynamicDimensionInference* dynamic_dimension_inference) { VLOG(2) << "Reshaping input dim " << input_dim << " to " << VectorString(output_dims); const Shape operand_shape = reshape->operand(0)->shape(); TF_RET_CHECK(output_dims.size() > 1); const Shape mask_input_shape = ShapeUtil::MakeShape(xla::S32, {operand_shape.dimensions(input_dim)}); const Shape pred_input_shape = ShapeUtil::MakeShape(xla::PRED, {operand_shape.dimensions(input_dim)}); HloInstruction* zero = reshape->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(S32))); HloInstruction* one = reshape->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::One(S32))); HloInstruction* input_shape_binary_mask = GenerateBinaryMask(reshape, input_dim, output_dims, output_dynamic_dims, one, zero, true); if (input_shape_binary_mask == nullptr) { VLOG(2) << "No need to rewrite"; return false; } auto embedded_builder = HloComputation::Builder("add"); { auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(S32, {}), "lhs")); auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(S32, {}), "rhs")); embedded_builder.AddInstruction( HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs)); } HloComputation* add = reshape->GetModule()->AddEmbeddedComputation(embedded_builder.Build()); Window cumsum_window; WindowDimension* dim = cumsum_window.add_dimensions(); dim->set_size(operand_shape.dimensions(input_dim)); dim->set_stride(1); dim->set_padding_low(operand_shape.dimensions(input_dim) - 1); dim->set_padding_high(0); dim->set_window_dilation(1); dim->set_base_dilation(1); HloInstruction* cumsum = reshape->AddInstruction(HloInstruction::CreateReduceWindow( mask_input_shape, input_shape_binary_mask, zero, cumsum_window, add)); HloInstruction* broadcast_ones = reshape->AddInstruction( HloInstruction::CreateBroadcast(mask_input_shape, one, {})); cumsum = reshape->AddInstruction(HloInstruction::CreateBinary( mask_input_shape, HloOpcode::kSubtract, cumsum, broadcast_ones)); GatherDimensionNumbers gather_dim_numbers; for (int64_t i = 0; i < operand_shape.dimensions_size(); ++i) { if (i != input_dim) { gather_dim_numbers.add_offset_dims(i); } } gather_dim_numbers.add_start_index_map(input_dim); gather_dim_numbers.set_index_vector_dim(1); gather_dim_numbers.add_collapsed_slice_dims(input_dim); HloInstruction* operand_static_dim_size = reshape->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(operand_shape.dimensions(input_dim)))); HloInstruction* operand_static = reshape->AddInstruction(HloInstruction::CreateSetDimensionSize( operand_shape, reshape->mutable_operand(0), operand_static_dim_size, input_dim)); std::vector<int64_t> slice_sizes(operand_shape.dimensions().begin(), operand_shape.dimensions().end()); slice_sizes[input_dim] = 1; HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather( ShapeUtil::MakeShape(operand_shape.element_type(), operand_shape.dimensions()), operand_static, cumsum, gather_dim_numbers, slice_sizes, true)); TF_RETURN_IF_ERROR(reshape->ReplaceOperandWith(0, gather)); HloInstruction* reshape_dynamic = reshape; auto users = reshape->users(); for (int64_t output_dim : output_dims) { HloInstruction* output_dynamic_size = dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim); if (output_dynamic_size != nullptr) { reshape_dynamic = reshape->AddInstruction(HloInstruction::CreateSetDimensionSize( reshape->shape(), reshape_dynamic, output_dynamic_size, output_dim)); } } for (auto* user : users) { TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, reshape_dynamic)); } TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( reshape, reshape_dynamic, {})); return true; } absl::StatusOr<bool> RewriteDynamicReshapeCombineInput( HloInstruction* reshape, absl::Span<const int64_t> input_dims, int64_t output_dim, absl::Span<HloInstruction*> input_dynamic_dims, DynamicDimensionInference* dynamic_dimension_inference) { HloInstruction* zero = reshape->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(S32))); HloInstruction* one = reshape->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::One(S32))); const Shape output_shape = reshape->shape(); const Shape input_shape = reshape->operand(0)->shape(); const Shape mask_output_shape = ShapeUtil::MakeShape(xla::S32, {output_shape.dimensions(output_dim)}); HloInstruction* output_shape_binary_mask = GenerateBinaryMask(reshape, output_dim, input_dims, input_dynamic_dims, one, zero, false); if (output_shape_binary_mask == nullptr) { VLOG(2) << "No need to rewrite"; return false; } HloInstruction* iota = reshape->AddInstruction(HloInstruction::CreateIota(mask_output_shape, 0)); HloComputation::Builder comp_builder("compare"); HloInstruction* lhs_key = comp_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeScalarShape(S32), "lhs_key")); HloInstruction* rhs_key = comp_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeScalarShape(S32), "rhs_key")); comp_builder.AddInstruction(HloInstruction::CreateParameter( 2, ShapeUtil::MakeScalarShape(S32), "lhs_value")); comp_builder.AddInstruction(HloInstruction::CreateParameter( 3, ShapeUtil::MakeScalarShape(S32), "rhs_value")); comp_builder.AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), lhs_key, rhs_key, ComparisonDirection::kGt)); HloComputation* compare = reshape->GetModule()->AddEmbeddedComputation(comp_builder.Build()); HloInstruction* sort = reshape->AddInstruction(HloInstruction::CreateSort( ShapeUtil::MakeTupleShape({mask_output_shape, mask_output_shape}), 0, {output_shape_binary_mask, iota}, compare, true)); HloInstruction* gather_indices = reshape->AddInstruction( HloInstruction::CreateGetTupleElement(mask_output_shape, sort, 1)); GatherDimensionNumbers gather_dim_numbers; for (int64_t i = 0; i < output_shape.dimensions_size(); ++i) { if (i != output_dim) { gather_dim_numbers.add_offset_dims(i); } } gather_dim_numbers.add_start_index_map(output_dim); gather_dim_numbers.set_index_vector_dim(1); gather_dim_numbers.add_collapsed_slice_dims(output_dim); HloInstruction* static_dim_size = reshape->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( reshape->shape().dimensions(output_dim)))); Shape reshape_static_shape = reshape->shape(); reshape_static_shape.set_dynamic_dimension(output_dim, false); HloInstruction* reshape_static = reshape->AddInstruction(HloInstruction::CreateSetDimensionSize( reshape_static_shape, reshape, static_dim_size, output_dim)); std::vector<int64_t> gather_slice_sizes(output_shape.dimensions().begin(), output_shape.dimensions().end()); gather_slice_sizes[output_dim] = 1; HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather( output_shape, reshape_static, gather_indices, gather_dim_numbers, gather_slice_sizes, true)); HloInstruction* output_dynamic_size = dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim); TF_RET_CHECK(output_dynamic_size != nullptr); gather = reshape->AddInstruction(HloInstruction::CreateSetDimensionSize( gather->shape(), gather, output_dynamic_size, output_dim)); auto users = reshape->users(); for (auto* user : users) { if (user != reshape_static && user != output_dynamic_size) { TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, gather)); } } if (reshape == reshape->parent()->root_instruction()) { reshape->parent()->set_root_instruction(gather); } TF_RETURN_IF_ERROR( dynamic_dimension_inference->ForwardDynamicSize(reshape, gather, {})); return true; } absl::StatusOr<bool> RewriteDynamicReshapeSingleGroup( HloInstruction* reshape, absl::Span<const int64_t> input_dims, absl::Span<const int64_t> output_dims, absl::Span<HloInstruction*> input_dynamic_dims, absl::Span<HloInstruction*> output_dynamic_dims, DynamicDimensionInference* dynamic_dimension_inference) { VLOG(2) << "Rewriting dynamic reshape " << reshape->ToString() << " input dims: " << VectorString(input_dims) << " output dims: " << VectorString(output_dims); const Shape operand_shape = reshape->operand(0)->shape(); const Shape output_shape = reshape->shape(); if (input_dims.size() == 1) { int64_t input_dim = input_dims[0]; if (operand_shape.dimensions()[input_dim] == 1) { return false; } return RewriteDynamicReshapeSplitInput(reshape, input_dim, output_dims, output_dynamic_dims, dynamic_dimension_inference); } if (output_dims.size() == 1) { int64_t output_dim = output_dims[0]; if (output_shape.dimensions()[output_dim] == 1) { return false; } return RewriteDynamicReshapeCombineInput(reshape, input_dims, output_dim, input_dynamic_dims, dynamic_dimension_inference); } TF_RET_CHECK(false); return false; } absl::StatusOr<bool> RewriteReverse( HloInstruction* reverse, DynamicDimensionInference* dynamic_dimension_inference) { auto reverse_dims = reverse->dimensions(); const Shape& reverse_shape = reverse->shape(); std::set<int64_t> dynamic_reverse_dims; for (int64_t reverse_dim : reverse_dims) { HloInstruction* dynamic_size = dynamic_dimension_inference->GetDynamicSize(reverse, {}, reverse_dim); if (dynamic_size == nullptr) { continue; } dynamic_reverse_dims.insert(reverse_dim); } if (dynamic_reverse_dims.empty()) { return false; } PaddingConfig padding; Shape pad_shape = reverse_shape; for (int i = 0; i < reverse_shape.rank(); ++i) { auto dimension = padding.add_dimensions(); if (dynamic_reverse_dims.count(i) > 0) { dimension->set_edge_padding_low(0); dimension->set_edge_padding_high(reverse_shape.dimensions(i)); dimension->set_interior_padding(0); pad_shape.set_dimensions(i, 2 * pad_shape.dimensions(i)); } } HloInstruction* cloned_reverse = reverse->AddInstruction(reverse->Clone()); HloInstruction* zero = reverse->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(pad_shape.element_type()))); HloInstruction* pad = reverse->AddInstruction( HloInstruction::CreatePad(pad_shape, cloned_reverse, zero, padding)); std::vector<HloInstruction*> start_indices; start_indices.reserve(reverse_shape.rank()); for (int i = 0; i < reverse_shape.rank(); ++i) { if (dynamic_reverse_dims.count(i) > 0) { HloInstruction* bound_size = reverse->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(reverse_shape.dimensions(i)))); HloInstruction* dynamic_size = dynamic_dimension_inference->GetDynamicSize(reverse, {}, i); HloInstruction* start_offset = reverse->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract, bound_size, dynamic_size)); start_indices.push_back(start_offset); } else { HloInstruction* zero = reverse->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(S32))); start_indices.push_back(zero); } } HloInstruction* dynamic_reverse = reverse->AddInstruction(HloInstruction::CreateDynamicSlice( reverse_shape, pad, start_indices, reverse_shape.dimensions())); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( reverse, dynamic_reverse, {})); TF_RETURN_IF_ERROR(reverse->ReplaceAllUsesWith(dynamic_reverse)); return true; } HloInstruction* RewriteInputWithDynamicPadding( HloInstruction* conv, HloInstruction* input, HloInstruction* padding_value, absl::Span<HloInstruction*> padding_before, Window* input_window, absl::FunctionRef<int64_t(int64_t)> window_dim_to_shape_dim) { HloInstruction* zero_s32 = conv->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(S32))); Shape padded_shape = input->shape(); PaddingConfig padding_configs; for (int64_t i = 0; i < input->shape().rank(); ++i) { PaddingConfig::PaddingConfigDimension padding_dim; *padding_configs.add_dimensions() = padding_dim; } std::vector<HloInstruction*> start_indices(input->shape().rank(), zero_s32); for (int64_t dim_index = 0; dim_index < input_window->dimensions_size(); ++dim_index) { if (padding_before[dim_index] == nullptr) { continue; } int64_t shape_dim = window_dim_to_shape_dim(dim_index); WindowDimension* window_dim = input_window->mutable_dimensions(dim_index); auto* padding_dim = padding_configs.mutable_dimensions(shape_dim); const int64_t dilated_window_size = window_util::DilatedBound( window_dim->size(), window_dim->window_dilation()); padding_dim->set_edge_padding_low(dilated_window_size); padding_dim->set_edge_padding_high(window_dim->padding_high() + window_dim->padding_low()); padding_dim->set_interior_padding(window_dim->base_dilation() - 1); HloInstruction* slicing_start = conv->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract, conv->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( padding_dim->edge_padding_low()))), padding_before[dim_index])); start_indices[shape_dim] = slicing_start; padded_shape.mutable_dimensions()[shape_dim] = window_dim->padding_low() + window_util::DilatedBound(padded_shape.dimensions(shape_dim), window_dim->base_dilation()) + window_dim->padding_high(); window_dim->clear_padding_high(); window_dim->clear_padding_low(); window_dim->set_base_dilation(1); input->mutable_shape()->set_dynamic_dimension(shape_dim, false); } HloInstruction* pad = MakePadHlo(input, padding_value, padding_configs).value(); input = conv->AddInstruction(HloInstruction::CreateDynamicSlice( padded_shape, pad, start_indices, padded_shape.dimensions())); return input; } absl::StatusOr<bool> RewriteDynamicConvolutionInputGrad( HloInstruction* custom_call_conv, DynamicDimensionInference* dynamic_dimension_inference) { HloInstruction* grad = custom_call_conv->mutable_operand(1); HloInstruction* kernel = custom_call_conv->mutable_operand(2); TF_RET_CHECK(kernel->shape().is_static()); auto dnums = custom_call_conv->convolution_dimension_numbers(); Window window = custom_call_conv->window(); HloInstruction* zero = custom_call_conv->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(custom_call_conv->shape().element_type()))); std::vector<HloInstruction*> padding_before( dnums.input_spatial_dimensions_size(), nullptr); for (int64_t spatial_dim_index = 0; spatial_dim_index < dnums.input_spatial_dimensions_size(); ++spatial_dim_index) { int64_t input_spatial_dim = dnums.input_spatial_dimensions(spatial_dim_index); HloInstruction* operand_dynamic_size = dynamic_dimension_inference->GetDynamicSize( custom_call_conv->mutable_operand(1), {}, input_spatial_dim); if (operand_dynamic_size == nullptr) { continue; } grad = PadWithScalar(grad, input_spatial_dim, operand_dynamic_size, zero); HloInstruction* slice = custom_call_conv->AddInstruction(HloInstruction::CreateSlice( ShapeUtil::MakeShape(S32, {1}), custom_call_conv->mutable_operand(0), {input_spatial_dim}, {input_spatial_dim + 1}, {1})); HloInstruction* dynamic_input_size = custom_call_conv->AddInstruction( HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice)); const WindowDimension& window_dim = window.dimensions(spatial_dim_index); DynamicWindowDims dynamic_window_dims = GetWindowedInputGradSize( dynamic_input_size, window_dim.size(), window_dim.window_dilation(), window_dim.base_dilation(), custom_call_conv->padding_type()); padding_before[spatial_dim_index] = dynamic_window_dims.padding_before; } if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) { grad = RewriteInputWithDynamicPadding( custom_call_conv, grad, zero, absl::MakeSpan(padding_before), &window, [&](int64_t dim) { return dnums.input_spatial_dimensions(dim); }); } PrecisionConfig precision_config; if (custom_call_conv->precision_config().operand_precision_size() == 3) { *precision_config.mutable_operand_precision() = { custom_call_conv->precision_config().operand_precision().begin() + 1, custom_call_conv->precision_config().operand_precision().end()}; } HloInstruction* static_conv = custom_call_conv->AddInstruction(HloInstruction::CreateConvolve( custom_call_conv->shape(), grad, kernel, custom_call_conv->feature_group_count(), custom_call_conv->batch_group_count(), window, custom_call_conv->convolution_dimension_numbers(), custom_call_conv->precision_config())); TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv)); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( custom_call_conv, static_conv, {})); return true; } absl::StatusOr<bool> RewriteDynamicConvolutionForward( HloInstruction* custom_call_conv, DynamicDimensionInference* dynamic_dimension_inference) { HloInstruction* input = custom_call_conv->mutable_operand(0); HloInstruction* kernel = custom_call_conv->mutable_operand(1); Window window = custom_call_conv->window(); auto dnums = custom_call_conv->convolution_dimension_numbers(); HloInstruction* zero = custom_call_conv->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(custom_call_conv->shape().element_type()))); std::vector<HloInstruction*> padding_before( dnums.input_spatial_dimensions_size(), nullptr); for (int64_t spatial_dim_index = 0; spatial_dim_index < dnums.input_spatial_dimensions_size(); ++spatial_dim_index) { int64_t input_spatial_dim = dnums.input_spatial_dimensions(spatial_dim_index); HloInstruction* operand_dynamic_size = dynamic_dimension_inference->GetDynamicSize( custom_call_conv->mutable_operand(0), {}, input_spatial_dim); if (operand_dynamic_size == nullptr) { continue; } input = PadWithScalar(input, input_spatial_dim, operand_dynamic_size, zero); const WindowDimension& window_dim = window.dimensions(spatial_dim_index); DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize( operand_dynamic_size, window_dim.size(), window_dim.window_dilation(), window_dim.stride(), custom_call_conv->padding_type()); padding_before[spatial_dim_index] = dynamic_window_dims.padding_before; } const int64_t input_feature_dim = dnums.input_feature_dimension(); if (HloInstruction* input_feature_dynamic_size = dynamic_dimension_inference->GetDynamicSize( custom_call_conv->mutable_operand(0), {}, input_feature_dim)) { input = PadWithScalar(input, input_feature_dim, input_feature_dynamic_size, zero); } if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) { input = RewriteInputWithDynamicPadding( custom_call_conv, input, zero, absl::MakeSpan(padding_before), &window, [&](int64_t dim) { return dnums.input_spatial_dimensions(dim); }); } HloInstruction* static_conv = custom_call_conv->AddInstruction(HloInstruction::CreateConvolve( custom_call_conv->shape(), input, kernel, custom_call_conv->feature_group_count(), custom_call_conv->batch_group_count(), window, custom_call_conv->convolution_dimension_numbers(), custom_call_conv->precision_config())); TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv)); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( custom_call_conv, static_conv, {})); return true; } absl::StatusOr<bool> RewriteDynamicConvolutionKernelGrad( HloInstruction* custom_call_conv, DynamicDimensionInference* dynamic_dimension_inference) { HloInstruction* activations = custom_call_conv->mutable_operand(0); HloInstruction* gradients = custom_call_conv->mutable_operand(1); TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(activations)); TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(gradients)); Window window = custom_call_conv->window(); auto dnums = custom_call_conv->convolution_dimension_numbers(); HloInstruction* zero = custom_call_conv->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(custom_call_conv->shape().element_type()))); std::vector<HloInstruction*> padding_before( dnums.input_spatial_dimensions_size(), nullptr); for (int64_t spatial_dim_index = 0; spatial_dim_index < dnums.input_spatial_dimensions_size(); ++spatial_dim_index) { int64_t input_spatial_dim = dnums.input_spatial_dimensions(spatial_dim_index); int64_t kernel_spatial_dim = dnums.kernel_spatial_dimensions(spatial_dim_index); HloInstruction* activations_dynamic_size = dynamic_dimension_inference->GetDynamicSize( custom_call_conv->mutable_operand(0), {}, input_spatial_dim); if (activations_dynamic_size != nullptr) { activations = PadWithScalar(activations, input_spatial_dim, activations_dynamic_size, zero); } HloInstruction* gradients_dynamic_size = dynamic_dimension_inference->GetDynamicSize( custom_call_conv->mutable_operand(1), {}, kernel_spatial_dim); if (gradients_dynamic_size != nullptr) { gradients = PadWithScalar(gradients, kernel_spatial_dim, gradients_dynamic_size, zero); } if (activations_dynamic_size == nullptr || gradients_dynamic_size == nullptr) { TF_RET_CHECK(activations_dynamic_size == nullptr && gradients_dynamic_size == nullptr); continue; } int64_t output_spatial_dim = dnums.output_spatial_dimensions(spatial_dim_index); const WindowDimension& window_dim = window.dimensions(spatial_dim_index); DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize( activations_dynamic_size, custom_call_conv->shape().dimensions(output_spatial_dim), window_dim.stride(), window_dim.window_dilation(), custom_call_conv->padding_type()); padding_before[spatial_dim_index] = dynamic_window_dims.padding_before; } const int64_t input_feature_dim = dnums.input_feature_dimension(); if (HloInstruction* input_feature_dynamic_size = dynamic_dimension_inference->GetDynamicSize( custom_call_conv->mutable_operand(0), {}, input_feature_dim)) { activations = PadWithScalar(activations, input_feature_dim, input_feature_dynamic_size, zero); } if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) { activations = RewriteInputWithDynamicPadding( custom_call_conv, activations, zero, absl::MakeSpan(padding_before), &window, [&](int64_t dim) { return dnums.input_spatial_dimensions(dim); }); } HloInstruction* static_conv = custom_call_conv->AddInstruction(HloInstruction::CreateConvolve( custom_call_conv->shape(), activations, gradients, custom_call_conv->feature_group_count(), custom_call_conv->batch_group_count(), window, custom_call_conv->convolution_dimension_numbers(), custom_call_conv->precision_config())); TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv)); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( custom_call_conv, static_conv, {})); return true; } absl::StatusOr<bool> RewriteDynamicReduceWindowSamePadding( HloInstruction* hlo, DynamicDimensionInference* dynamic_dimension_inference) { if (hlo->shape().IsTuple()) { return Unimplemented("DynamicReduceWindowSamePadding not yet supported."); } HloInstruction* input = hlo->mutable_operand(0); HloInstruction* init = hlo->mutable_operand(1); int64_t rank = hlo->shape().rank(); Window window = hlo->window(); std::vector<HloInstruction*> padding_before(hlo->shape().rank(), nullptr); for (int64_t dim_index = 0; dim_index < rank; ++dim_index) { HloInstruction* operand_dynamic_size = dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {}, dim_index); if (operand_dynamic_size == nullptr) { continue; } const WindowDimension& window_dim = window.dimensions(dim_index); if (window_util::IsTrivialWindowDimension(window_dim)) { continue; } input = PadWithScalar(input, dim_index, operand_dynamic_size, init); DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize( operand_dynamic_size, window_dim.size(), window_dim.window_dilation(), window_dim.stride(), PaddingType::PADDING_SAME); padding_before[dim_index] = dynamic_window_dims.padding_before; } input = RewriteInputWithDynamicPadding( hlo, input, init, absl::MakeSpan(padding_before), &window, [](int64_t dim) { return dim; }); HloInstruction* rewritten = hlo->AddInstruction(HloInstruction::CreateReduceWindow( hlo->shape(), input, init, window, hlo->called_computations()[0])); TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten)); TF_RETURN_IF_ERROR( dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {})); return true; } absl::StatusOr<bool> RewriteDynamicSelectAndScatterSamePadding( HloInstruction* hlo, DynamicDimensionInference* dynamic_dimension_inference) { HloInstruction* input = hlo->mutable_operand(0); HloInstruction* source = hlo->mutable_operand(1); HloInstruction* init = hlo->mutable_operand(2); TF_ASSIGN_OR_RETURN(HloInstruction * input_padding_value, ChooseIdentityValue(hlo, 0)); int64_t rank = hlo->shape().rank(); Window window = hlo->window(); std::vector<HloInstruction*> padding_before(hlo->shape().rank(), nullptr); for (int64_t dim_index = 0; dim_index < rank; ++dim_index) { const WindowDimension& window_dim = window.dimensions(dim_index); if (window_util::IsTrivialWindowDimension(window_dim)) { continue; } HloInstruction* operand_dynamic_size = dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {}, dim_index); if (operand_dynamic_size == nullptr) { continue; } input = PadWithScalar(input, dim_index, operand_dynamic_size, input_padding_value); HloInstruction* source_dynamic_size = dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(1), {}, dim_index); if (source_dynamic_size == nullptr) { continue; } source = PadWithScalar(source, dim_index, source_dynamic_size, init); DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize( operand_dynamic_size, window_dim.size(), window_dim.window_dilation(), window_dim.stride(), PaddingType::PADDING_SAME); padding_before[dim_index] = dynamic_window_dims.padding_before; } input = RewriteInputWithDynamicPadding( hlo, input, input_padding_value, absl::MakeSpan(padding_before), &window, [](int64_t dim) { return dim; }); HloInstruction* rewritten = hlo->AddInstruction(HloInstruction::CreateSelectAndScatter( input->shape(), input, hlo->called_computations()[0], window, source, init, hlo->called_computations()[1])); std::vector<HloInstruction*> start_indices( input->shape().rank(), hlo->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(S32)))); PaddingConfig padding_configs; for (int64_t dim_index = 0; dim_index < rank; ++dim_index) { PaddingConfig::PaddingConfigDimension padding_dim; if (padding_before[dim_index] != nullptr) { const WindowDimension& window_dim = window.dimensions(dim_index); const int64_t dilated_window_size = window_util::DilatedBound( window_dim.size(), window_dim.window_dilation()); padding_dim.set_edge_padding_high(dilated_window_size); start_indices[dim_index] = padding_before[dim_index]; } *padding_configs.add_dimensions() = padding_dim; } HloInstruction* padded = MakePadHlo(rewritten, init, padding_configs).value(); rewritten = hlo->AddInstruction(HloInstruction::CreateDynamicSlice( hlo->shape(), padded, start_indices, hlo->shape().dimensions())); TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten)); TF_RETURN_IF_ERROR( dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {})); return true; } absl::StatusOr<bool> RewriteDynamicConcat( HloInstruction* concat, DynamicDimensionInference* dynamic_dimension_inference) { const int64_t concat_dim = concat->concatenate_dimension(); if (dynamic_dimension_inference->GetDynamicSize(concat, {}, concat_dim) == nullptr) { return false; } std::vector<HloInstruction*> offsets; offsets.reserve(concat->shape().dimensions_size()); for (int64_t i = 0; i < concat->shape().dimensions_size(); ++i) { offsets.push_back(concat->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)))); } HloInstruction* rewritten_concat = concat; auto prev_users = concat->users(); for (int64_t i = 0; i < concat->operand_count(); ++i) { HloInstruction* operand = concat->mutable_operand(i); rewritten_concat = concat->AddInstruction(HloInstruction::CreateDynamicUpdateSlice( rewritten_concat->shape(), rewritten_concat, operand, offsets)); HloInstruction* dynamic_size = dynamic_dimension_inference->GetDynamicSize(operand, {}, concat_dim); if (dynamic_size == nullptr) { HloInstruction* static_size = concat->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( operand->shape().dimensions(concat_dim)))); offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim], static_size)); } else { offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim], dynamic_size)); } } TF_RETURN_IF_ERROR(concat->ReplaceUsesWith(prev_users, rewritten_concat)); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( concat, rewritten_concat, {})); return true; } absl::StatusOr<bool> RewriteDynamicSort( HloInstruction* hlo, DynamicDimensionInference* dynamic_dimension_inference) { HloInstruction* dynamic_size = nullptr; HloSortInstruction* sort = Cast<HloSortInstruction>(hlo); int64_t sort_dim = sort->sort_dimension(); for (auto* operand : sort->operands()) { if (dynamic_size == nullptr) { dynamic_size = dynamic_dimension_inference->GetDynamicSize(operand, {}, sort_dim); } } if (dynamic_size == nullptr) { return false; } Shape operand_shape = ShapeUtil::ChangeElementType(sort->operand(0)->shape(), S32); Shape broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape); HloInstruction* iota = hlo->AddInstruction( HloInstruction::CreateIota(broadcast_shape, sort_dim)); HloInstruction* dynamic_size_broadcasted = hlo->AddInstruction( HloInstruction::CreateBroadcast(broadcast_shape, dynamic_size, {})); HloInstruction* lt = hlo->AddInstruction(HloInstruction::CreateCompare( ShapeUtil::ChangeElementType(broadcast_shape, PRED), iota, dynamic_size_broadcasted, ComparisonDirection::kLt)); sort->AppendOperand(lt); const int64_t param_number_before_rewritten = sort->called_computations()[0]->num_parameters(); auto new_param_0 = HloInstruction::CreateParameter( param_number_before_rewritten, ShapeUtil::MakeScalarShape(PRED), "inbound_lhs"); auto new_param_1 = HloInstruction::CreateParameter( param_number_before_rewritten + 1, ShapeUtil::MakeScalarShape(PRED), "inbound_rhs"); std::vector<const HloInstruction*> extra_parameters{new_param_0.get(), new_param_1.get()}; HloComputation* sort_comp = sort->GetModule()->AddEmbeddedComputation( sort->called_computations()[0]->CloneWithReplacements( nullptr, extra_parameters)); auto inbound_lhs = sort_comp->parameter_instruction(param_number_before_rewritten); auto inbound_rhs = sort_comp->parameter_instruction(param_number_before_rewritten + 1); sort->ReplaceCalledComputations( [&](HloComputation* comp) { return sort_comp; }); auto out_of_bound_rhs = sort_comp->AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeScalarShape(PRED), HloOpcode::kNot, inbound_rhs)); auto sort_comp_or_out_of_bound_rhs = sort_comp->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(PRED), HloOpcode::kOr, sort_comp->root_instruction(), out_of_bound_rhs)); auto new_root = sort_comp->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(PRED), HloOpcode::kAnd, inbound_lhs, sort_comp_or_out_of_bound_rhs)); sort_comp->set_root_instruction(new_root); if (sort->shape().IsTuple()) { *sort->mutable_shape()->add_tuple_shapes() = ShapeUtil::ChangeElementType(operand_shape, PRED); } else { auto sort_users = sort->users(); auto sort_clone = hlo->AddInstruction(sort->Clone()); *sort_clone->mutable_shape() = ShapeUtil::MakeTupleShape( {sort->shape(), ShapeUtil::ChangeElementType(operand_shape, PRED)}); auto rewritten_sort = hlo->AddInstruction( HloInstruction::CreateGetTupleElement(sort->shape(), sort_clone, 0)); for (HloInstruction* user : sort_users) { TF_RETURN_IF_ERROR(sort->ReplaceUseWith(user, rewritten_sort)); } TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( sort, rewritten_sort, {})); if (hlo->parent()->root_instruction() == sort) { hlo->parent()->set_root_instruction(rewritten_sort); } } return true; } absl::StatusOr<bool> RewriteDynamicBinaryOp( HloInstruction* binary, DynamicDimensionInference* dynamic_dimension_inference) { HloInstruction* operand_0 = binary->mutable_operand(0); HloInstruction* operand_1 = binary->mutable_operand(1); TF_RET_CHECK(operand_0->shape().rank() == operand_1->shape().rank()); auto dims_0 = dynamic_dimension_inference->GetDynamicSizes(operand_0, {}); auto dims_1 = dynamic_dimension_inference->GetDynamicSizes(operand_1, {}); bool changed = false; for (int64_t i = 0; i < dims_0.size(); ++i) { HloInstruction* dim_0 = dims_0[i]; HloInstruction* dim_1 = dims_1[i]; if (dims_0[i] != dims_1[i] && dims_0[i] != nullptr && dims_1[i] != nullptr) { changed = true; auto rewrite_operand = [&](HloInstruction* pred, HloInstruction* operand) -> HloInstruction* { Shape static_shape = ShapeUtil::MakeStaticShape(operand->shape()); pred = binary->AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::ChangeElementType(static_shape, PRED), pred, {})); Shape slice_shape = static_shape; slice_shape.set_dimensions(i, 1); std::vector<int64_t> start_indices(slice_shape.rank(), 0); std::vector<int64_t> strides(slice_shape.rank(), 1); HloInstruction* slice = binary->AddInstruction( HloInstruction::CreateSlice(slice_shape, operand, start_indices, slice_shape.dimensions(), strides)); Shape reshape_shape = ShapeUtil::DeleteDimension(i, slice_shape); HloInstruction* reshape = binary->AddInstruction( HloInstruction::CreateReshape(reshape_shape, slice)); std::vector<int64_t> broadcast_dims; broadcast_dims.reserve(static_shape.rank() - 1); for (int64_t j = 0; j < static_shape.rank(); ++j) { if (j != i) { broadcast_dims.push_back(j); } } HloInstruction* broadcast = binary->parent()->AddInstruction( HloInstruction::CreateBroadcast(static_shape, reshape, broadcast_dims), "implicit_broadcast"); HloInstruction* select = binary->AddInstruction(HloInstruction::CreateTernary( static_shape, HloOpcode::kSelect, pred, broadcast, operand)); return select; }; HloInstruction* one = binary->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::One(S32))); auto operand_0_needs_broadcast = binary->parent()->AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0, dim_1, ComparisonDirection::kLt), "lhs_less_than_rhs"); auto is_one = binary->parent()->AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0, one, ComparisonDirection::kEq), "lhs_is_one"); operand_0_needs_broadcast = binary->parent()->AddInstruction( HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one, operand_0_needs_broadcast), "lhs_needs_implicit_broadcast"); operand_0 = rewrite_operand(operand_0_needs_broadcast, operand_0); auto operand_1_needs_broadcast = binary->parent()->AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1, dim_0, ComparisonDirection::kLt), "rhs_less_than_lhs"); is_one = binary->parent()->AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1, one, ComparisonDirection::kEq), "rhs_is_one"); operand_1_needs_broadcast = binary->parent()->AddInstruction( HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one, operand_1_needs_broadcast), "lhs_needs_implicit_broadcast"); operand_1 = rewrite_operand(operand_1_needs_broadcast, operand_1); } } if (changed) { TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(0, operand_0)); TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(1, operand_1)); } return changed; } absl::StatusOr<bool> RewriteDynamicUpdateSlice( HloInstruction* hlo, DynamicDimensionInference* dynamic_dimension_inference) { HloDynamicUpdateSliceInstruction* dus = Cast<HloDynamicUpdateSliceInstruction>(hlo); HloInstruction* update = dus->mutable_operand(1); HloInstruction* base = dus->mutable_operand(0); std::vector<HloInstruction*> dynamic_dims_in_partial_update( update->shape().rank(), nullptr); bool needs_rewrite = false; for (int64_t i = 0; i < update->shape().rank(); ++i) { if (update->shape().dimensions(i) < base->shape().dimensions(i)) { HloInstruction* dynamic_dim = dynamic_dimension_inference->GetDynamicSize(update, {}, i); if (dynamic_dim != nullptr) { dynamic_dims_in_partial_update[i] = dynamic_dim; needs_rewrite = true; } } } if (!needs_rewrite) { return false; } std::vector<HloInstruction*> indices; indices.reserve(dus->operand_count() - 2); for (int64_t i = 2; i < dus->operand_count(); ++i) { indices.push_back(dus->mutable_operand(i)); } HloInstruction* base_slice = dus->AddInstruction(HloInstruction::CreateDynamicSlice( update->shape(), base, indices, update->shape().dimensions())); for (int64_t i = 0; i < dynamic_dims_in_partial_update.size(); ++i) { HloInstruction* dynamic_dim = dynamic_dims_in_partial_update[i]; if (dynamic_dim != nullptr) { Shape mask_shape_int = ShapeUtil::ChangeElementType(update->shape(), S32); Shape mask_shape_pred = ShapeUtil::ChangeElementType(update->shape(), PRED); HloInstruction* iota = dus->AddInstruction(HloInstruction::CreateIota(mask_shape_int, i)); HloInstruction* broadcast_dim = dus->AddInstruction( HloInstruction::CreateBroadcast(mask_shape_int, dynamic_dim, {})); HloInstruction* pred = dus->AddInstruction(HloInstruction::CreateCompare( mask_shape_pred, iota, broadcast_dim, ComparisonDirection::kLt)); update = dus->AddInstruction(HloInstruction::CreateTernary( update->shape(), HloOpcode::kSelect, pred, update, base_slice)); } } TF_RETURN_IF_ERROR(dus->ReplaceOperandWith(1, update)); return true; } absl::StatusOr<bool> RewriteDynamicReshape( HloInstruction* reshape, DynamicDimensionInference* dynamic_dimension_inference) { bool changed = false; HloInstruction* operand = reshape->mutable_operand(0); std::vector<HloInstruction*> input_dynamic_dims; input_dynamic_dims.reserve(operand->shape().dimensions_size()); for (int64_t dim = 0; dim < operand->shape().dimensions_size(); ++dim) { input_dynamic_dims.push_back( dynamic_dimension_inference->GetDynamicSize(operand, {}, dim)); } std::vector<HloInstruction*> output_dynamic_dims; output_dynamic_dims.reserve(reshape->shape().dimensions_size()); for (int64_t dim = 0; dim < reshape->shape().dimensions_size(); ++dim) { output_dynamic_dims.push_back( dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim)); } auto common_factors = CommonFactors(operand->shape().dimensions(), reshape->shape().dimensions()); bool need_flatten_unflatten = false; auto is_dynamic_dimension = [&](int64_t dim) { HloInstruction* operand_dynamic_size = dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim); return operand_dynamic_size != nullptr || reshape->shape().is_dynamic_dimension(dim); }; auto should_skip_common_factor_group = [&](DimensionVector input_dims, DimensionVector output_dims) { if (input_dims.empty() || output_dims.empty()) { return true; } if (absl::c_none_of(output_dims, is_dynamic_dimension)) { VLOG(2) << "All dimensions are static in this common factor group"; return true; } if (input_dims.size() == 1 && output_dims.size() == 1) { return true; } return false; }; for (int64_t i = 0; i < common_factors.size() - 1; ++i) { auto start = common_factors[i]; auto end = common_factors[i + 1]; DimensionVector input_dims; DimensionVector output_dims; for (int64_t dim = start.first; dim < end.first; ++dim) { input_dims.push_back(dim); } for (int64_t dim = start.second; dim < end.second; ++dim) { output_dims.push_back(dim); } if (should_skip_common_factor_group(input_dims, output_dims)) { continue; } if (input_dims.size() > 1 && output_dims.size() > 1) { need_flatten_unflatten = true; break; } } if (need_flatten_unflatten) { VLOG(2) << "Rewrite dynamic reshape to flatten-unflatten pair. " << reshape->ToString(); int64_t num_elements = ShapeUtil::ElementsIn(operand->shape()); Shape flattened_shape = ShapeUtil::MakeShape(operand->shape().element_type(), {num_elements}); HloInstruction* flatten = operand->parent()->AddInstruction( HloInstruction::CreateReshape(flattened_shape, operand), absl::StrCat(reshape->name(), ".flatten")); HloInstruction* dynamic_size = operand->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(num_elements))); for (int64_t i = 0; i < operand->shape().rank(); i++) { HloInstruction* dynamic_dim_size = dynamic_dimension_inference->GetDynamicSize(operand, {}, i); if (dynamic_dim_size != nullptr) { HloInstruction* static_dim_size = operand->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( operand->shape().dimensions(i)))); dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kDivide, dynamic_size, static_dim_size)); dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size, dynamic_dim_size)); } } dynamic_dimension_inference->SetDynamicSize(flatten, {}, 0, dynamic_size); Shape unflattened_shape = ShapeUtil::MakeStaticShape(reshape->shape()); HloInstruction* unflatten = reshape->parent()->AddInstruction( HloInstruction::CreateReshape(unflattened_shape, flatten), absl::StrCat(reshape->name(), ".unflatten")); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( reshape, unflatten, {})); TF_ASSIGN_OR_RETURN( bool changed_unused, RewriteDynamicReshape(flatten, dynamic_dimension_inference)); TF_ASSIGN_OR_RETURN( changed_unused, RewriteDynamicReshape(unflatten, dynamic_dimension_inference)); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( reshape, unflatten, {})); TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(unflatten)); return true; } for (int64_t i = 0; i < common_factors.size() - 1; ++i) { auto start = common_factors[i]; auto end = common_factors[i + 1]; DimensionVector input_dims; DimensionVector output_dims; for (int64_t dim = start.first; dim < end.first; ++dim) { input_dims.push_back(dim); } for (int64_t dim = start.second; dim < end.second; ++dim) { output_dims.push_back(dim); } VLOG(2) << "input_dims: " << VectorString(input_dims); VLOG(2) << "output_dims: " << VectorString(output_dims); if (should_skip_common_factor_group(input_dims, output_dims)) { continue; } if (input_dims.size() > 1 && output_dims.size() > 1) { return Internal( "Should be handled by decomposing reshape into " "flatten-unflatten pair. %s", reshape->ToString()); } TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReshapeSingleGroup( reshape, input_dims, output_dims, absl::MakeSpan(input_dynamic_dims), absl::MakeSpan(output_dynamic_dims), dynamic_dimension_inference)); changed |= c; } if (reshape->opcode() == HloOpcode::kDynamicReshape) { auto* static_reshape = reshape->AddInstruction(HloInstruction::CreateReshape( reshape->shape(), reshape->mutable_operand(0))); TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(static_reshape)); TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize( reshape, static_reshape, {})); changed = true; } return changed; } class DynamicShapeRemovingVisitor : public DfsHloRewriteVisitor { public: explicit DynamicShapeRemovingVisitor( const OpSupportsDynamismHandler& op_supports_dynamism_handler, DynamicDimensionInference* dynamic_dimension_inference, const absl::flat_hash_set<absl::string_view>& execution_threads) : op_supports_dynamism_handler_(op_supports_dynamism_handler), dynamic_dimension_inference_(dynamic_dimension_inference), execution_threads_(execution_threads) {} absl::Status DefaultAction(HloInstruction* hlo) override; absl::Status HandleCustomCall(HloInstruction* hlo) override; absl::Status HandleTuple(HloInstruction* hlo) override; absl::Status HandleGetTupleElement(HloInstruction* hlo) override; absl::Status HandleParameter(HloInstruction* hlo) override; absl::Status HandleInfeed(HloInstruction* hlo) override; absl::Status HandleAsyncStart(HloInstruction* hlo) override; absl::Status HandleAsyncUpdate(HloInstruction* hlo) override; absl::Status HandleAsyncDone(HloInstruction* hlo) override; absl::Status HandleWhile(HloInstruction* hlo) override; absl::Status HandleConditional(HloInstruction* hlo) override; absl::Status HandleGetDimensionSize(HloInstruction* hlo) override; absl::Status HandleSetDimensionSize(HloInstruction* hlo) override; static absl::StatusOr<bool> Run( HloComputation* computation, const OpSupportsDynamismHandler& op_supports_dynamism_handler, DynamicDimensionInference* dynamic_shape_inference, const absl::flat_hash_set<absl::string_view>& execution_threads, bool require_dynamic_output) { DynamicShapeRemovingVisitor visitor(op_supports_dynamism_handler, dynamic_shape_inference, execution_threads); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); if (require_dynamic_output) { HloInstruction* root = computation->root_instruction(); if (dynamic_shape_inference->HasDynamicDimension(root)) { TF_ASSIGN_OR_RETURN(HloInstruction * new_root, visitor.ConvertToDynamic(root)); computation->set_root_instruction(new_root); } } return visitor.changed(); } private: absl::StatusOr<HloInstruction*> ConvertToDynamic(HloInstruction* inst); absl::Status ConvertOperandsToDynamic(HloInstruction* inst); const OpSupportsDynamismHandler& op_supports_dynamism_handler_; DynamicDimensionInference* dynamic_dimension_inference_; absl::flat_hash_set<absl::string_view> execution_threads_; }; absl::StatusOr<HloInstruction*> DynamicShapeRemovingVisitor::ConvertToDynamic( HloInstruction* inst) { if (!dynamic_dimension_inference_->HasDynamicDimension(inst)) { return absl::OkStatus(); } MarkAsChanged(); Shape shape = dynamic_dimension_inference_->GetDynamicShape(inst); auto gtes = TupleUtil::DisassembleTupleInstruction(inst); gtes.ForEachMutableElement([&](const ShapeIndex& index, HloInstruction** element) { const Shape& subshape = ShapeUtil::GetSubshape(shape, index); if (!subshape.IsArray()) { return; } if (!dynamic_dimension_inference_->HasDynamicDimension(inst, index)) { return; } std::vector<HloInstruction*> slice_operand; slice_operand.push_back(*element); for (int64_t i = 0; i < subshape.dimensions_size(); ++i) { auto dimension_size = dynamic_dimension_inference_->GetDynamicSize(inst, index, i); if (dimension_size == nullptr) { dimension_size = inst->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(subshape.dimensions(i)))); } slice_operand.push_back(dimension_size); } *element = inst->AddInstruction(HloInstruction::CreateCustomCall( subshape, slice_operand, "SliceToDynamic")); }); return TupleUtil::AssembleTupleInstruction(inst->parent(), std::move(gtes)); } absl::Status DynamicShapeRemovingVisitor::ConvertOperandsToDynamic( HloInstruction* inst) { for (int64_t i = 0; i < inst->operand_count(); ++i) { auto operand = inst->mutable_operand(i); if (dynamic_dimension_inference_->HasDynamicDimension(operand)) { TF_ASSIGN_OR_RETURN(auto dynamic_operand, ConvertToDynamic(inst->mutable_operand(i))); TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(i, dynamic_operand)); MarkAsChanged(); } } return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::DefaultAction(HloInstruction* hlo) { OpDynamismSupport op_support = OpDynamismSupport::kNoSupport; if (op_supports_dynamism_handler_) { op_support = op_supports_dynamism_handler_(hlo); } if (op_support == OpDynamismSupport::kRequired) { VLOG(1) << "op doesn't support static tensor: " << hlo->ToString(); return ConvertOperandsToDynamic(hlo); } const bool input_is_dynamic = absl::c_any_of( hlo->operands(), [](const HloInstruction* hlo) { return hlo->shape().is_dynamic(); }); if (!input_is_dynamic) { return absl::OkStatus(); } TF_RET_CHECK(op_support != OpDynamismSupport::kNoSupport) << "Dynamic input unexpectedly found for unsupported instruction: " << hlo->ToString(); return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleGetTupleElement( HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleTuple(HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleInfeed(HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleParameter(HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleCustomCall( HloInstruction* hlo) { if (hlo->custom_call_target() == "SliceToDynamic" || hlo->custom_call_target() == "PadToStatic") { return absl::OkStatus(); } return DefaultAction(hlo); } absl::Status DynamicShapeRemovingVisitor::HandleAsyncStart( HloInstruction* hlo) { if (HloInstruction::IsThreadIncluded(hlo->async_execution_thread(), execution_threads_)) { return absl::OkStatus(); } return ConvertOperandsToDynamic(hlo); } absl::Status DynamicShapeRemovingVisitor::HandleAsyncUpdate( HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleAsyncDone(HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleWhile(HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleConditional( HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleGetDimensionSize( HloInstruction* hlo) { return absl::OkStatus(); } absl::Status DynamicShapeRemovingVisitor::HandleSetDimensionSize( HloInstruction* hlo) { *hlo->mutable_shape() = hlo->operand(0)->shape(); hlo->mutable_shape()->set_dynamic_dimension(hlo->dimension(), false); return absl::OkStatus(); } } absl::StatusOr<bool> DynamicPadder::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(2) << "Pre DynamicPadder HLO:"; XLA_VLOG_LINES(2, module->ToString()); HloDCE dce; TF_ASSIGN_OR_RETURN(bool changed, dce.Run(module, execution_threads)); TF_ASSIGN_OR_RETURN( DynamicDimensionInference dynamic_dimension_inference, DynamicDimensionInference::Run( module, options_.op_supports_dynamism_handler, options_.custom_call_handler, options_.shape_check_mode, options_.assertion_generator, execution_threads)); changed |= dynamic_dimension_inference.changed(); std::vector<HloComputation*> computations = module->MakeComputationPostOrder(execution_threads); for (HloComputation* computation : computations) { for (HloInstruction* inst : computation->MakeInstructionPostOrder()) { OpDynamismSupport has_dynamism_support = OpDynamismSupport::kNoSupport; if (options_.op_supports_dynamism_handler != nullptr) { has_dynamism_support = options_.op_supports_dynamism_handler(inst); } if (has_dynamism_support != OpDynamismSupport::kNoSupport) { continue; } if (inst->opcode() == HloOpcode::kConcatenate) { TF_ASSIGN_OR_RETURN( bool c, RewriteDynamicConcat(inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->opcode() == HloOpcode::kReverse) { TF_ASSIGN_OR_RETURN(bool c, RewriteReverse(inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->opcode() == HloOpcode::kSort) { TF_ASSIGN_OR_RETURN( bool c, RewriteDynamicSort(inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->opcode() == HloOpcode::kReshape || inst->opcode() == HloOpcode::kDynamicReshape) { TF_ASSIGN_OR_RETURN( bool c, RewriteDynamicReshape(inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->IsElementwiseBinary()) { TF_ASSIGN_OR_RETURN( bool c, RewriteDynamicBinaryOp(inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->opcode() == HloOpcode::kDynamicUpdateSlice) { TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicUpdateSlice( inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->IsCustomCall("DynamicConvolutionInputGrad")) { TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionInputGrad( inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->IsCustomCall("DynamicConvolutionForward")) { TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionForward( inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->IsCustomCall("DynamicConvolutionKernelGrad")) { TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionKernelGrad( inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->IsCustomCall("DynamicReduceWindowSamePadding")) { TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReduceWindowSamePadding( inst, &dynamic_dimension_inference)); changed |= c; continue; } if (inst->IsCustomCall("DynamicSelectAndScatterSamePadding")) { TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicSelectAndScatterSamePadding( inst, &dynamic_dimension_inference)); changed |= c; continue; } for (int64_t operand_num = 0; operand_num < inst->operand_count(); ++operand_num) { HloInstruction* original_operand = inst->mutable_operand(operand_num); HloInstruction* operand = original_operand; if (!operand->shape().IsArray()) { continue; } for (int64_t input_dim = 0; input_dim < operand->shape().rank(); ++input_dim) { HloInstruction* operand_dynamic_size = dynamic_dimension_inference.GetDynamicSize(original_operand, {}, input_dim); if (operand_dynamic_size == nullptr) { continue; } VLOG(2) << "Has dynamic dimension of operand" << operand_num << " @" << input_dim; if (ShouldSkipPadOnOperand(inst, operand_num, input_dim, execution_threads)) { continue; } TF_ASSIGN_OR_RETURN(HloInstruction * identity_value, ChooseIdentityValue(inst, operand_num)); if (identity_value == nullptr) { continue; } HloInstruction* padded = PadWithScalar( operand, input_dim, operand_dynamic_size, identity_value); TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(operand_num, padded)); operand = inst->mutable_operand(operand_num); changed = true; } } } } auto call_graph = CallGraph::Build(module, execution_threads); computations = module->MakeComputationPostOrder(execution_threads); for (auto it = computations.rbegin(); it != computations.rend(); ++it) { HloComputation* computation = *it; if (!call_graph->CanReach(module->entry_computation(), computation)) { continue; } bool require_dynamic_output = options_.slice_dynamic_output && computation == module->entry_computation(); changed |= require_dynamic_output; TF_ASSIGN_OR_RETURN(bool c, DynamicShapeRemovingVisitor::Run( computation, options_.op_supports_dynamism_handler, &dynamic_dimension_inference, execution_threads, require_dynamic_output)); changed |= c; } if (changed) { dynamic_padding_gauge->GetCell()->Set(changed); module->set_is_dynamic(true); } for (auto* computation : module->computations(execution_threads)) { if (!call_graph->CanReach(module->entry_computation(), computation)) { continue; } for (auto instruction : computation->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN( bool c, ReplaceGetSize(instruction, &dynamic_dimension_inference)); changed |= c; } } for (auto* computation : module->computations(execution_threads)) { if (!call_graph->CanReach(module->entry_computation(), computation)) { continue; } for (auto instruction : computation->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool c, ReplaceSetSize(instruction)); changed |= c; TF_ASSIGN_OR_RETURN(c, ReplaceSetBound(instruction)); changed |= c; } } if (changed) { HloDCE dce; TF_ASSIGN_OR_RETURN(bool c, dce.Run(module, execution_threads)); changed |= c; } VLOG(2) << "Post DynamicPadder HLO:"; XLA_VLOG_LINES(2, module->ToString()); return changed; } }
#include "xla/service/dynamic_padder.h" #include <cstdint> #include <memory> #include <string> #include <utility> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_replace.h" #include "absl/types/span.h" #include "xla/error_spec.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/dynamic_dimension_inference.h" #include "xla/service/dynamic_dimension_simplifier.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/service/tuple_simplifier.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/client_library_test_base.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/llvm_irgen_test_base.h" #include "xla/tests/test_macros.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test_benchmark.h" #include "tsl/protobuf/error_codes.pb.h" namespace xla { namespace { namespace m = ::xla::match; namespace op = xla::testing::opcode_matchers; OpDynamismSupport OpHasDynamismSupport(HloInstruction* hlo) { if (hlo->opcode() != HloOpcode::kCustomCall) { return OpDynamismSupport::kNoSupport; } if (hlo->custom_call_target() == "OpWithDynamicLowering") { return OpDynamismSupport::kRequired; } return OpDynamismSupport::kNoSupport; } absl::Status CustomCallDynamicDimensionInference( HloInstruction* hlo, DynamicDimensionInference* inferencer) { if (hlo->custom_call_target() == "OpWithDynamicLowering") { if (hlo->shape().IsTuple()) { HloInstruction* dynamic_size = inferencer->GetDynamicSize(hlo->mutable_operand(0), {1}, 0); inferencer->SetDynamicSize(hlo, {1}, 0, dynamic_size); } else { HloInstruction* dynamic_size = inferencer->GetDynamicSize(hlo->mutable_operand(0), {}, 0); inferencer->SetDynamicSize(hlo, {}, 0, dynamic_size); } } return absl::OkStatus(); } class DynamicPadderTest : public HloTestBase { protected: DynamicPadderTest() : HloTestBase() { module_ = CreateNewVerifiedModule(); } std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) { std::unique_ptr<HloModule> module = ParseAndReturnVerifiedModule(hlo_text).value(); return module; } absl::StatusOr<bool> RunPadder( bool slice_dynamic_output = false, OpSupportsDynamismHandler op_supports_dynamism_handler = OpHasDynamismSupport, DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler = CustomCallDynamicDimensionInference) { DynamicPadderOptions options; options.slice_dynamic_output = slice_dynamic_output; options.op_supports_dynamism_handler = std::move(op_supports_dynamism_handler); options.custom_call_handler = std::move(custom_call_handler); DynamicPadder padder(std::move(options)); TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&padder, module_.get())); if (!changed) return false; TupleSimplifier tuple_simplifier; TF_RETURN_IF_ERROR(RunHloPass(&tuple_simplifier, module_.get()).status()); AlgebraicSimplifier alg_simplifier(AlgebraicSimplifierOptions{}); TF_RETURN_IF_ERROR(RunHloPass(&alg_simplifier, module_.get()).status()); return true; } void ExpectPadded(const HloInstruction* inst) { EXPECT_THAT(inst, op::Select(op::Lt(op::Iota(), op::Broadcast(op::Parameter())), ::testing::_, op::Broadcast())); } HloComputation* GetScalarAddComputation() { auto embedded_builder = HloComputation::Builder("add"); auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "lhs")); auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {}), "rhs")); embedded_builder.AddInstruction( HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs)); return module_->AddEmbeddedComputation(embedded_builder.Build()); } std::unique_ptr<HloModule> module_; const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {}); }; class MemoryAlignmentTest : public HloTestBase {}; TEST_F(MemoryAlignmentTest, DISABLED_ON_CPU(TestDataTypeFP16)) { const std::string hlo_text = R"( HloModule TestDataTypeFP16 update_add (p0: f16[], p1: f16[]) -> f16[] { p0 = f16[] parameter(0) p1 = f16[] parameter(1) ROOT out = f16[] add(p0, p1) } ENTRY main () -> f16[<=1,1] { c1 = s32[1]{0} constant({1}) c2 = f16[1,1]{1,0} constant({ {0.099976} }) shape = s32[] reshape(s32[1]{0} c1) dim_size = f16[<=1,1]{1,0} set-dimension-size(f16[1,1]{1,0} c2, s32[] shape), dimensions={0} ROOT out = f16[<=1,1]{1,0} scatter(f16[<=1,1]{1,0} dim_size, s32[1]{0} c1, f16[1,1]{1,0} c2), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=update_add } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); } TEST_F(DynamicPadderTest, ReduceTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}); auto reduce_shape = ShapeUtil::MakeShape(F32, {2}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true}); auto data_param = builder.AddInstruction( HloInstruction::CreateParameter(0, input_shape, "data_param")); auto* size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param, size_param, 2)); auto negate = builder.AddInstruction(HloInstruction::CreateUnary( dynamic_shape, HloOpcode::kNegate, data_param)); auto init = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto reduce = builder.AddInstruction(HloInstruction::CreateReduce( reduce_shape, negate, init, {0, 2}, GetScalarAddComputation())); EXPECT_FALSE(module_->is_dynamic()); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunPadder().status()); ExpectPadded(reduce->operand(0)); EXPECT_TRUE(module_->is_dynamic()); } TEST_F(DynamicPadderTest, DynamicLoweringTest) { const std::string hlo_text = R"( HloModule DynamicLowering ENTRY main { param = s32[5] parameter(0) const = s32[] constant(3) param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0} custom-call.1 = s32[<=5] custom-call(param_padded), custom_call_target="OpWithDynamicLowering" custom-call.2 = s32[<=5] custom-call(custom-call.1), custom_call_target="OpWithDynamicLowering" ROOT negate = s32[<=5] negate(custom-call.2) } )"; module_ = GetHloModule(hlo_text); TF_ASSERT_OK(RunPadder(true).status()); auto custom_call_1 = module_->entry_computation()->GetInstructionWithName("custom-call.1"); auto custom_call_2 = module_->entry_computation()->GetInstructionWithName("custom-call.2"); HloInstruction* slice_to_dynamic = custom_call_1->mutable_operand(0); ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall); ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic"); ASSERT_EQ(custom_call_2->user_count(), 1); HloInstruction* pad_to_static = custom_call_2->users()[0]; ASSERT_THAT(pad_to_static->opcode(), HloOpcode::kCustomCall); ASSERT_THAT(pad_to_static->custom_call_target(), "PadToStatic"); slice_to_dynamic = module_->entry_computation()->root_instruction(); ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall); ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic"); } TEST_F(DynamicPadderTest, DynamicLoweringTestTupleInput) { const std::string hlo_text = R"( HloModule DynamicLowering ENTRY main { param = s32[5] parameter(0) const = s32[] constant(3) param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0} tuple_arg = (s32[], s32[<=5]) tuple(const, param_padded) custom-call.1 = (s32[], s32[<=5]) custom-call(tuple_arg), custom_call_target="OpWithDynamicLowering" custom-call.2 = (s32[], s32[<=5]) custom-call(custom-call.1), custom_call_target="OpWithDynamicLowering" data = s32[<=5]{0} get-tuple-element(custom-call.2), index=1 ROOT negate = s32[<=5] negate(data) } )"; module_ = GetHloModule(hlo_text); TF_ASSERT_OK(RunPadder(true).status()); auto* root = module_->entry_computation()->root_instruction(); EXPECT_THAT(root, op::CustomCall( {"SliceToDynamic"}, op::Negate(), op::GetTupleElement(op::CustomCall({"PadToStatic"})))); HloInstruction* negate = root->mutable_operand(0); EXPECT_THAT( negate, op::Negate(op::GetTupleElement(op::CustomCall( {"PadToStatic"}, op::GetTupleElement(op::CustomCall( {"OpWithDynamicLowering"}, ::testing::_)))))); auto custom_call_1 = module_->entry_computation()->GetInstructionWithName("custom-call.1"); EXPECT_THAT(custom_call_1, op::CustomCall({"OpWithDynamicLowering"}, op::Tuple(op::Constant(), op::CustomCall({"SliceToDynamic"})))); } TEST_F(DynamicPadderTest, DynamicOutputNestedTuple) { const std::string hlo_text = R"( HloModule DynamicLowering ENTRY main { param = s32[5] parameter(0) const = s32[] constant(3) const2 = s32[] constant(4) param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0} tuple0 = (s32[], s32[<=5]) tuple(const, param_padded) ROOT tuple1 = (s32[], (s32[], s32[<=5])) tuple(const2, tuple0) } )"; module_ = GetHloModule(hlo_text); TF_ASSERT_OK(RunPadder(true).status()); TF_ASSERT_OK(TupleSimplifier().Run(module_.get()).status()); XLA_LOG_LINES(INFO, module_->ToString()); auto* root = module_->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Tuple(op::Constant(), op::Tuple())); HloInstruction* nested_tuple = root->mutable_operand(1); EXPECT_THAT(nested_tuple, op::Tuple(op::Constant(), op::CustomCall({"SliceToDynamic"}))); } TEST_F(DynamicPadderTest, ConvolutionTest) { auto builder = HloComputation::Builder(TestName()); constexpr int xdim = 3; constexpr int ydim = 2; constexpr int zdim = 1; auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}); auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim}); auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim}); auto xy_shape_dynamic = ShapeUtil::MakeShape(F32, {xdim, ydim}, {false, true}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, xy_shape, "A")); auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, yz_shape, "B")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0); dnums.set_kernel_input_feature_dimension(0); dnums.set_kernel_output_feature_dimension(1); dnums.set_input_batch_dimension(0); dnums.set_output_batch_dimension(1); dnums.set_output_feature_dimension(0); Window window; a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( xy_shape_dynamic, a_param, size_param, 1)); auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve( zx_shape, a_param, b_param, 1, 1, window, dnums, HloTestBase::DefaultPrecisionConfig(2))); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunPadder().status()); ExpectPadded(conv->operand(0)); } TEST_F(DynamicPadderTest, ConvolutionNoPad) { auto builder = HloComputation::Builder(TestName()); constexpr int xdim = 3; constexpr int ydim = 2; constexpr int zdim = 1; auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}); auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim}); auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, false}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, xy_shape, "A")); auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, yz_shape, "B")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param, 0)); auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0); dnums.set_kernel_input_feature_dimension(0); dnums.set_kernel_output_feature_dimension(1); dnums.set_input_batch_dimension(0); dnums.set_output_batch_dimension(1); dnums.set_output_feature_dimension(0); Window window; auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve( zx_shape, a_param, b_param, 1, 1, window, dnums, HloTestBase::DefaultPrecisionConfig(2))); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunPadder().status()); EXPECT_THAT(conv->operand(0), op::Parameter()); } TEST_F(DynamicPadderTest, ReduceWindowNoPadForTrivialWindow) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {4, 5}); auto reduce_shape = ShapeUtil::MakeShape(F32, {3, 5}, {false, true}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {false, true}); auto input = builder.AddInstruction( HloInstruction::CreateParameter(0, input_shape, "input")); auto* size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, input, size_param, 1)); auto init = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); TF_ASSERT_OK_AND_ASSIGN(Window window, ParseWindow("size=2x1 pad=0_0x0_0")); auto output = builder.AddInstruction(HloInstruction::CreateReduceWindow( reduce_shape, input, init, window, GetScalarAddComputation())); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunPadder().status()); EXPECT_THAT(output->operand(0), op::Parameter()); } TEST_F(DynamicPadderTest, VariadicReduceWindowNoPadForTrivialWindow) { const std::string hlo_text = R"( HloModule VariadicReduceWindowNoPadForTrivialWindow add_f32 (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) { a = f32[] parameter(0) b = s32[] parameter(1) c = f32[] parameter(2) d = s32[] parameter(3) add.0 = f32[] add(a, c) add.1 = s32[] add(b, d) ROOT out = tuple(add.0, add.1) } ENTRY main { input.0 = f32[4, 5] parameter(0) input.1 = s32[4, 5] parameter(1) size_param.0 = s32[] parameter(2) size_param.1 = s32[] parameter(3) input_dynamic.0 = f32[4,<=5] set-dimension-size(input.0, size_param.0), dimensions={1} input_dynamic.1 = s32[4,<=5] set-dimension-size(input.1, size_param.0), dimensions={1} init.0 = f32[] constant(0.0) init.1 = s32[] constant(0) ROOT output = (f32[3, <=5], s32[3, <=5]) reduce-window(input_dynamic.0, input_dynamic.1, init.0, init.1), window={size=2x1 pad=0_0x0_0}, to_apply=add_f32 } )"; const int kNumParams = 2; module_ = ParseAndReturnVerifiedModule(hlo_text).value(); TF_ASSERT_OK(RunPadder().status()); for (int i = 0; i < kNumParams; ++i) { EXPECT_THAT(module_->entry_computation()->root_instruction()->operand(i), op::Parameter()); } } TEST_F(DynamicPadderTest, PadS8ToS32Dot) { const std::string hlo_text = R"( HloModule test ENTRY test { a = s8[<=16,32] parameter(0) b = s8[32,64] parameter(1) ROOT root = s32[<=16,64] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; module_ = GetHloModule(hlo_text); TF_ASSERT_OK(RunPadder(true).status()); EXPECT_THAT(module_->entry_computation()->root_instruction(), GmockMatch(m::CustomCall({"SliceToDynamic"}, m::Dot(m::Op().WithShape(S8, {16, 32}), m::Op().WithShape(S8, {32, 64})) .WithShape(S32, {16, 64}), m::Op(), m::Op()))); } TEST_F(DynamicPadderTest, PadToStaticForCustomCall) { const std::string hlo_text = R"( HloModule test ENTRY test { a = f32[64] parameter(0) ROOT c = f32[<=128] custom-call(a), custom_call_target="UnknownOp" } )"; module_ = GetHloModule(hlo_text); TF_ASSERT_OK(RunPadder(true).status()); EXPECT_THAT(module_->entry_computation()->root_instruction(), GmockMatch(m::CustomCall({"UnknownOp"}))); } TEST_F(DynamicPadderTest, WhileLoopDynamicShapeChangeToStatic) { const std::string hlo_text = R"( HloModule WhileLoopDynamicShapeChangeToStatic %cond_wrapper.19447 { param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0) %get-tuple-element.184 = s32[] get-tuple-element(param), index=0 %get-tuple-element.185 = s32[] get-tuple-element(param), index=1 ROOT %compare.28 = pred[] compare(s32[] %get-tuple-element.184, s32[] %get-tuple-element.185), direction=LT } %while_body_78894_grad_83711__.18882 { param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0) %get-tuple-element.184 = s32[] get-tuple-element(param), index=0 %get-tuple-element.185 = s32[] get-tuple-element(param), index=1 %add.1 = s32[] add(get-tuple-element.184, get-tuple-element.184) %gte.2 = f32[] get-tuple-element(param), index=2 %broadcast.19389 = f32[32,216]{1,0} broadcast(f32[] %gte.2), dimensions={} %constant.32 = s32[] constant(32) %set-dimension-size = f32[<=32,216]{1,0} set-dimension-size(f32[32,216]{1,0} %broadcast.19389, s32[] %constant.32), dimensions={0} ROOT tuple = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(add.1, %get-tuple-element.185, %gte.2, %set-dimension-size) } ENTRY main { param = f32[] parameter(0) param.1 = f32[<=32,216]{1,0} parameter(1) const = s32[] constant(3) const2 = s32[] constant(4) %tuple.18877 = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(const, const2, param, param.1) %while.19451 = (s32[], s32[], f32[], f32[<=32,216]{1,0}) while((s32[], s32[], f32[], f32[<=32,216]{1,0}) %tuple.18877), condition=%cond_wrapper.19447, body=%while_body_78894_grad_83711__.18882 ROOT result = f32[<=32,216]{1,0} get-tuple-element(while.19451), index=3 } )"; module_ = GetHloModule(hlo_text); TF_ASSERT_OK(RunPadder(true).status()); XLA_LOG_LINES(INFO, module_->ToString()); auto* root = module_->entry_computation()->root_instruction(); EXPECT_EQ(root->shape(), ShapeUtil::MakeShape(F32, {32, 216}, {true, false})); HloInstruction* while_inst = nullptr; for (HloInstruction* inst : module_->entry_computation()->MakeInstructionPostOrder()) { if (inst->opcode() == HloOpcode::kWhile) { ASSERT_EQ(while_inst, nullptr) << "while_inst: " << while_inst->name() << ", inst: " << inst->name(); while_inst = inst; } } EXPECT_EQ(while_inst->shape(), ShapeUtil::MakeTupleShape({ShapeUtil::MakeScalarShape(S32), ShapeUtil::MakeScalarShape(S32), ShapeUtil::MakeScalarShape(F32), ShapeUtil::MakeShape(F32, {32, 216}), ShapeUtil::MakeScalarShape(S32)})); } TEST_F(DynamicPadderTest, WhileLoopCarriesRequiredDynamicShape) { const std::string hlo_text = R"( HloModule WhileLoopCarriesRequiredDynamicShape %cond { param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0) current = s32[] get-tuple-element(param), index=5 last = s32[] get-tuple-element(param), index=6 ROOT result = pred[] compare(current, last), direction=LT } %body { param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0) var = f32[1024] get-tuple-element(param), index=0 input0 = f32[<=64] get-tuple-element(param), index=1 grad0 = f32[32] get-tuple-element(param), index=2 input1 = f32[<=64] get-tuple-element(param), index=3 act1 = f32[32] get-tuple-element(param), index=4 grad1 = f32[32] custom-call(act1), custom_call_target="ComputeGradients" var1 = f32[1024] custom-call(var, input0, grad0), custom_call_target="ApplyGradients", output_to_operand_aliasing={{}: (0, {})} token2 = token[] get-tuple-element(param), index=7 infeed2 = (f32[<=64], token[]) infeed(token2) input2 = f32[<=64] get-tuple-element(infeed2), index=0 act2 = f32[32] custom-call(var1, input2), custom_call_target="ComputeActivations" current = s32[] get-tuple-element(param), index=5 constant1 = s32[] constant(1) add = s32[] add(current, constant1) last = s32[] get-tuple-element(param), index=6 token3 = token[] get-tuple-element(infeed2), index=1 ROOT result = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) tuple(var1, input1, grad1, input2, act2, add, last, token3) } ENTRY main { last = s32[] parameter(0) var = f32[1024] parameter(1) token0 = token[] after-all() infeed0 = (f32[<=64], token[]) infeed(token0) input0 = f32[<=64] get-tuple-element(infeed0), index=0 act0 = f32[32] custom-call(var, input0), custom_call_target="ComputeActivations" grad0 = f32[32] custom-call(act0), custom_call_target="ComputeGradients" token1 = token[] get-tuple-element(infeed0), index=1 infeed1 = (f32[<=64], token[]) infeed(token1) input1 = f32[<=64] get-tuple-element(infeed1), index=0 act1 = f32[32] custom-call(var, input1), custom_call_target="ComputeActivations" token2 = token[] get-tuple-element(infeed1), index=1 zero = s32[] constant(0) tuple = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) tuple(var, input0, grad0, input1, act1, zero, last, token2) while = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) while(tuple), condition=%cond, body=%body ROOT result = f32[1024] get-tuple-element(while), index=0 } )"; module_ = GetHloModule(hlo_text); auto op_supports_dynamism = [](HloInstruction* hlo) { if (hlo->opcode() != HloOpcode::kCustomCall) { return OpDynamismSupport::kNoSupport; } if (hlo->custom_call_target() == "ComputeActivations" || hlo->custom_call_target() == "ApplyGradients") { return OpDynamismSupport::kRequired; } return OpDynamismSupport::kNoSupport; }; auto custom_call_handler = [](HloInstruction* hlo, DynamicDimensionInference* inference) { return absl::OkStatus(); }; TF_ASSERT_OK( RunPadder( true, std::move(op_supports_dynamism), std::move(custom_call_handler)) .status()); XLA_VLOG_LINES(1, module_->ToString()); for (HloComputation* computation : module_->computations()) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kCustomCall) { EXPECT_NE(instruction->custom_call_target(), "PadToStatic"); EXPECT_NE(instruction->custom_call_target(), "SliceToDynamic"); if (instruction->custom_call_target() == "ComputeActivations") { EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic()); } else if (instruction->custom_call_target() == "ApplyGradients") { EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic()); } } else if (instruction->opcode() == HloOpcode::kWhile) { const Shape& shape = instruction->shape(); EXPECT_TRUE(shape.tuple_shapes(1).is_dynamic()); EXPECT_TRUE(shape.tuple_shapes(3).is_dynamic()); } } } } TEST_F(DynamicPadderTest, HandleReshapeCheckPastReshape) { auto hlo_text = R"( HloModule ReshapeDynamicDimension ENTRY main { p0 = f32[4,511,432]{2,1,0} parameter(0) p1 = s32[] parameter(1) p2 = f32[432,337]{1,0:T(8,128)} parameter(2) p0_dynamic = f32[<=4,511,432] set-dimension-size(p0, p1), dimensions={0} reshape.4179 = f32[<=2044,432]{1,0} reshape(p0_dynamic) dot.4180 = f32[<=2044,337]{1,0} dot(reshape.4179, p2), lhs_contracting_dims={1}, rhs_contracting_dims={0} transpose.4181 = f32[<=2044,337]{1,0} transpose(dot.4180), dimensions={0,1} ROOT reshape.4183 = f32[<=4,511,337]{2,1,0} reshape(transpose.4181) })"; module_ = GetHloModule(hlo_text); TF_ASSERT_OK(RunPadder(true).status()); VLOG(3) << module_->ToString(); CHECK(module_->is_dynamic()); CHECK(module_->entry_computation() ->root_instruction() ->shape() .is_dynamic_dimension(0)); } class ExecutionTest : public HloTestBase { protected: std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) { std::unique_ptr<HloModule> module = ParseAndReturnVerifiedModule(hlo_text).value(); return module; } absl::StatusOr<Literal> PadAndExecute(std::unique_ptr<HloModule> module, absl::Span<Literal* const> arguments, bool slice_dynamic_output = true) { if (!slice_dynamic_output) { auto new_config = module->config(); new_config.mutable_entry_computation_layout() ->mutable_result_layout() ->ClearDynamicShape(); module->set_config(new_config); } DynamicPadderOptions options; options.slice_dynamic_output = slice_dynamic_output; DynamicPadder padder(options); TF_CHECK_OK(padder.Run(module.get()).status()); HloDCE dce; TF_CHECK_OK(dce.Run(module.get()).status()); return Execute(std::move(module), {arguments}); } }; XLA_TEST_F(ExecutionTest, ScatterUpdate) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) ROOT rhs = s32[] parameter(1) } ENTRY main { operand = s32[3,3] parameter(0) indices = s32[INDICES_BOUND] parameter(1) updates = s32[INDICES_BOUND,3] parameter(2) dynamic_size = s32[] parameter(3) indices_dynamic = s32[<=INDICES_BOUND] set-dimension-size(indices, dynamic_size), dimensions={0} updates_dynamic = s32[<=INDICES_BOUND,3] set-dimension-size(updates, dynamic_size), dimensions={0} ROOT scatter = s32[3,3] scatter(operand, indices_dynamic, updates_dynamic), to_apply=update_s32, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 } )"; const std::string hlo_text_not_padded = absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "2"}}); auto module_not_padded = GetHloModule(hlo_text_not_padded); Literal operand = LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}); Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2}); Literal updates = LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}}); Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(2); Literal not_padded = ExecuteAndTransfer(std::move(module_not_padded), {&operand, &scatter_indices, &updates, &dynamic_size}); const std::string hlo_text_padded = absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "4"}}); auto module_padded = GetHloModule(hlo_text_padded); Literal scatter_indices_padded = LiteralUtil::CreateR1<int32_t>({0, 2, 0, 4}); Literal updates_padded = LiteralUtil::CreateR2<int32_t>( {{10, 20, 30}, {70, 80, 90}, {30, 22, 11}, {-1, 20, -1}}); DynamicPadder padder; TF_CHECK_OK(padder.Run(module_padded.get()).status()); TF_ASSERT_OK_AND_ASSIGN(Literal padded, PadAndExecute(std::move(module_padded), {&operand, &scatter_indices_padded, &updates_padded, &dynamic_size})); EXPECT_EQ(padded, not_padded); } XLA_TEST_F(ExecutionTest, ScatterUpdateWindowDim) { const std::string hlo_text = R"( HloModule ScatterUpdateWindowDim update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) ROOT rhs = s32[] parameter(1) } ENTRY main { operand = s32[1,2,3] parameter(0) indices = s32[1] parameter(1) updates = s32[2,3,1] parameter(2) dynamic_size = s32[] constant(1) operand_dynamic = s32[1, <=2, 3] set-dimension-size(operand, dynamic_size), dimensions={1} updates_dynamic = s32[<=2, 3, 1] set-dimension-size(updates, dynamic_size), dimensions={0} ROOT scatter = s32[1, <=2, 3] scatter(operand_dynamic, indices, updates_dynamic), to_apply=update_s32, update_window_dims={0, 1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 } )"; auto hlo_module = GetHloModule(hlo_text); Literal operand = LiteralUtil::CreateR3<int32_t>({{{0, 0, 0}, {0, 0, 0}}}); Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0}); Literal updates = LiteralUtil::CreateR3<int32_t>({{{10}, {20}, {30}}, {{70}, {80}, {90}}}); TF_ASSERT_OK_AND_ASSIGN( Literal padded, PadAndExecute(std::move(hlo_module), {&operand, &scatter_indices, &updates}, false)); Literal expected = LiteralUtil::CreateR3<int32_t>({{{10, 20, 30}, {70, 80, 90}}}); EXPECT_EQ(padded, expected); } XLA_TEST_F(ExecutionTest, ScatterUpdateF32) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_f32 (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) ROOT rhs = f32[] parameter(1) } ENTRY main { operand = f32[3,3] parameter(0) indices = s32[2] parameter(1) updates = f32[2,3] parameter(2) dynamic_size = s32[] parameter(3) indices_dynamic = s32[<=2] set-dimension-size(indices, dynamic_size), dimensions={0} updates_dynamic = f32[<=2,3] set-dimension-size(updates, dynamic_size), dimensions={0} ROOT scatter = f32[3,3] scatter(operand, indices_dynamic, updates_dynamic), to_apply=update_f32, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 } )"; auto module_not_padded = GetHloModule(hlo_text); Literal operand = LiteralUtil::CreateR2<float>( {{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}}); Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2}); Literal updates = LiteralUtil::CreateR2<float>({{10.0, 20.0, 30.0}, {70.0, 80.0, 90.0}}); Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(1); auto module_padded = GetHloModule(hlo_text); DynamicPadder padder; TF_CHECK_OK(padder.Run(module_padded.get()).status()); TF_ASSERT_OK_AND_ASSIGN( Literal not_padded, PadAndExecute(std::move(module_padded), {&operand, &scatter_indices, &updates, &dynamic_size})); EXPECT_EQ(LiteralUtil::CreateR2<float>( {{10.0, 20.0, 30.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}}), not_padded); } XLA_TEST_F(ExecutionTest, WholeDimensionGather) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[3, 2, 1] parameter(0) size = s32[] constant(1) param_padded = s32[3, <=2, 1] set-dimension-size(param, size), dimensions={1} index = s32[] constant(1) gather = s32[<=2,1]{1,0} gather(param_padded, index), offset_dims={0,1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=0, slice_sizes={1,2,1} init = s32[] constant(0) ROOT reduce = s32[] reduce(gather, init), dimensions={0, 1}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR3<int32_t>({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}}); auto module = GetHloModule(hlo_text); DynamicPadder padder; TF_CHECK_OK(padder.Run(module.get()).status()); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(3); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, TwoDimensionReduce) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[INDICES_BOUND, INDICES_BOUND] parameter(0) dynamic_size = s32[] parameter(1) param_0 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param, dynamic_size), dimensions={0} param_1 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param_0, dynamic_size), dimensions={1} const = s32[] constant(0) ROOT reduce = s32[] reduce(param_1, const), dimensions={0, 1}, to_apply=update_s32 } )"; const std::string hlo_text_not_padded = absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "2"}}); auto module_not_padded = GetHloModule(hlo_text_not_padded); Literal operand = LiteralUtil::CreateR2<int32_t>({{1, 2}, {4, 5}}); Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(2); Literal not_padded = ExecuteAndTransfer(std::move(module_not_padded), {&operand, &dynamic_size}); const std::string hlo_text_padded = absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "4"}}); auto module_padded = GetHloModule(hlo_text_padded); Literal operand_padded = LiteralUtil::CreateR2<int32_t>( {{1, 2, 3, 4}, {4, 5, 6, 7}, {1, 2, 3, 4}, {4, 5, 6, 7}}); DynamicPadder padder; TF_CHECK_OK(padder.Run(module_padded.get()).status()); TF_ASSERT_OK_AND_ASSIGN(Literal padded, PadAndExecute(std::move(module_padded), {&operand_padded, &dynamic_size})); EXPECT_EQ(padded, not_padded); } XLA_TEST_F(ExecutionTest, DynamicDimensionClamp) { const std::string hlo_text = R"( HloModule TensorFlowTenaryV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[5] parameter(0) const = s32[] constant(3) param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0} clamp = s32[<=5] clamp(param_padded, param_padded, param_padded) init = s32[] constant(0) ROOT reduce = s32[] reduce(clamp, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(6); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicConcat) { const std::string hlo_text = R"( HloModule DynamicConcat ENTRY main { param_0 = s32[3] parameter(0) param_1 = s32[3] parameter(1) param_2 = s32[3] parameter(2) size = s32[] constant(2) param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0} param_padded_2 = s32[<=3] set-dimension-size(param_2, size), dimensions={0} ROOT %concatenate = s32[<=9] concatenate(s32[<=3] param_padded_0, s32[<=3] param_1, s32[<=3] param_padded_2), dimensions={0} } )"; Literal operand_0 = LiteralUtil::CreateR1<int32_t>({1, 2, -1}); Literal operand_1 = LiteralUtil::CreateR1<int32_t>({3, 4, 5}); Literal operand_2 = LiteralUtil::CreateR1<int32_t>({6, 7, -1}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN( Literal result, PadAndExecute(std::move(module), {&operand_0, &operand_1, &operand_2}, false)); result.SetDynamicSize(0, 7); Literal expected = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6, 7}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicReverseSingleDim) { const std::string hlo_text = R"( HloModule DynamicConcat ENTRY main { param_0 = s32[3] parameter(0) size = s32[] constant(2) param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0} ROOT %reverse = s32[<=3] reverse(s32[<=3] param_padded_0), dimensions={0} } )"; Literal operand_0 = LiteralUtil::CreateR1<int32_t>({1, 2, -1}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN( Literal result, PadAndExecute(std::move(module), {&operand_0}, false)); result.SetDynamicSize(0, 2); Literal expected = LiteralUtil::CreateR1<int32_t>({2, 1}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicReverseMultiDims) { const std::string hlo_text = R"( HloModule DynamicConcat ENTRY main { param_0 = s32[3, 3] parameter(0) size = s32[] constant(2) param_padded_0 = s32[<=3, 3] set-dimension-size(param_0, size), dimensions={0} param_padded_1 = s32[<=3, <=3] set-dimension-size(param_padded_0, size), dimensions={1} ROOT %reverse = s32[<=3, <=3] reverse(s32[<=3, <=3] param_padded_1), dimensions={0, 1} } )"; Literal operand_0 = LiteralUtil::CreateR2<int32_t>( {{1, 2, -1}, {3, 4, -1}, {-1, -1, -1}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN( Literal result, PadAndExecute(std::move(module), {&operand_0}, false)); result.SetDynamicSize(0, 2); result.SetDynamicSize(1, 2); Literal expected = LiteralUtil::CreateR2<int32_t>({{4, 3}, {2, 1}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicDimensionReduce) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[5] parameter(0) const = s32[] constant(3) param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0} init = s32[] constant(0) ROOT reduce = s32[] reduce(param_padded, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(6); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, InputMinorDimensionReshape) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[1, 2, 5, 1] parameter(0) const = s32[] constant(3) param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2} reshaped = s32[<=10] reshape(param_padded) init = s32[] constant(0) ROOT reduce = s32[] reduce(reshaped, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR4<int32_t>( {{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(18); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, SliceSingleElement) { const std::string hlo_text = R"( HloModule Slicing ENTRY main { param = s32[5] parameter(0) const = s32[] constant(3) param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0} ROOT slice = s32[1]{0} slice(param_padded), slice={[0:1]} } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2, 3, 4}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR1<int32_t>({0}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshape) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[12] parameter(0) const = s32[] constant(8) param_padded = s32[<=12] set-dimension-size(param, const), dimensions={0} reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1 init = s32[] constant(0) ROOT reduce = s32[2, 2] reduce(reshaped, init), dimensions={1}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR2<int32_t>({{2, 4}, {10, 12}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMajor) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[2, 6] parameter(0) const = s32[] constant(4) param_padded = s32[2, <=6] set-dimension-size(param, const), dimensions={1} reshaped = s32[2, 2, <=3] reshape(param_padded), inferred_dimension=2 init = s32[] constant(0) ROOT reduce = s32[2, 2] reduce(reshaped, init), dimensions={2}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR2<int32_t>( {{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 10, 11}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR2<int32_t>({{1, 5}, {13, 17}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMinor) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[6, 2] parameter(0) const = s32[] constant(4) param_padded = s32[<=6, 2] set-dimension-size(param, const), dimensions={0} reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1 init = s32[] constant(0) ROOT reduce = s32[2, 2] reduce(reshaped, init), dimensions={1}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR2<int32_t>( {{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR2<int32_t>({{2, 4}, {10, 12}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicInputFeature) { const std::string hlo_text = R"( HloModule DynamicInputFeature ENTRY main { param = f32[1, 1, 5] parameter(0) const = s32[] constant(5) one = f32[] constant(1) kernel = f32[1,5,1]{2,1,0} broadcast(f32[] one), dimensions={} param_dynamic = f32[1,1,<=5] set-dimension-size(param, const), dimensions={2} ROOT conv = f32[1, 1, 1]{2,1,0} custom-call(f32[1, 1, <=5] param_dynamic, f32[1,<=5,1]{2,1,0} kernel), window={size=1 pad=0_0}, dim_labels=b0f_0io->b0f, padding_type=PADDING_VALID, custom_call_target="DynamicConvolutionForward" } )"; Literal operand = LiteralUtil::CreateR3<float>({{{1, 2, 3, 4, 5}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR3<float>({{{15}}}); EXPECT_EQ(result, expected); } XLA_TEST_F(LlvmIrGenTestBase, LargeDynamicInput) { #ifndef XLA_TEST_BACKEND_GPU GTEST_SKIP(); #endif const std::string hlo_text = R"( HloModule LargeDynamicInput add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY main { param = f32[<=20,<=20,<=20,<=20,<=20,<=20,<=20,<=20] parameter(0) zero = f32[] constant(0) ROOT out = reduce(param, zero), to_apply=add, dimensions={0,1,2,3,4,5,6,7} } )"; CompileAndVerifyIr(hlo_text, R"( CHECK: ret void )", true); } XLA_TEST_F(ExecutionTest, DynamicDimensionReshapeUnchanged) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[1, 2, 5, 1] parameter(0) const = s32[] constant(3) param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2} reshaped = s32[2, <=5] reshape(param_padded) init = s32[] constant(0) ROOT reduce = s32[2] reduce(reshaped, init), dimensions={1}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR4<int32_t>( {{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR1<int32_t>({6, 12}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DegeneratedDimension) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[1, 2, 5, 1] parameter(0) size = s32[] constant(0) param_padded = s32[<=1, 2, 5, 1] set-dimension-size(param, size), dimensions={0} reshaped = s32[<=10] reshape(param_padded) init = s32[] constant(0) ROOT reduce = s32[] reduce(reshaped, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR4<int32_t>( {{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(0); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, ReshapeSplitCombineSameTime) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[4, 2, 2] parameter(0) two = s32[] constant(2) one = s32[] constant(1) param_padded_partial = s32[<=4, 2, 2] set-dimension-size(param, two), dimensions={0} param_padded_dynamic = s32[<=4, 2, <=2] set-dimension-size(param_padded_partial, one), dimensions={2} reshaped = s32[2, <=2, <=4] reshape(param_padded_dynamic), inferred_dimension=1 init = s32[] constant(0) ROOT reduce = s32[] reduce(reshaped, init), dimensions={0, 1, 2}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR3<int32_t>({{{0, -1}, {1, -1}}, {{2, -1}, {3, -1}}, {{-1, -1}, {-1, -1}}, {{-1, -1}, {-1, -1}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(6); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, ReshapeComplicated) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[2, 4, 4] parameter(0) two = s32[] constant(2) param_padded_dynamic = s32[2, <=4, 4] set-dimension-size(param, two), dimensions={1} reshaped = s32[<=16, 2] reshape(param_padded_dynamic), inferred_dimension=0 init = s32[] constant(0) ROOT reduce = s32[] reduce(reshaped, init), dimensions={0, 1}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR3<int32_t>( {{{1, 2, 3, 4}, {5, 6, 7, 8}, {-1, -1, -1, -1}, {-1, -1, -1, -1}}, {{9, 10, 11, 12}, {13, 14, 15, 16}, {-1, -1, -1, -1}, {-1, -1, -1, -1}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(136); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, WhileLoopStack) { const std::string hlo_text = R"( HloModule module update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } body { stack = (s32[<=4,2]) parameter(0) stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0 stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0} zero = s32[] constant(0) one = s32[] constant(1) new_data = s32[1, 2] broadcast(s32[] stack_size), dimensions={} new_stack_size = s32[] add(stack_size, one) new_stack_buffer = s32[<=4, 2] set-dimension-size(stack_buffer, new_stack_size), dimensions={0} new_stack = s32[<=4, 2] dynamic-update-slice(new_stack_buffer, new_data, stack_size, zero) ROOT new_stack_tuple = (s32[<=4,2]) tuple(new_stack) } condition { stack = (s32[<=4,2]) parameter(0) stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0 stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0} three = s32[] constant(3) ROOT less-than = pred[] compare(s32[] stack_size, s32[] three), direction=LT } ENTRY entry { zero = s32[] constant(0) pad = s32[] constant(-1) stack_buffer_input = s32[4, 2] broadcast(s32[] pad), dimensions={} stack_buffer_input_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, zero), dimensions={0} input_tuple = (s32[<=4 ,2]) tuple(stack_buffer_input_dynamic) while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0 ROOT reduce = s32[2] reduce(stack_buffer, zero), dimensions={0}, to_apply=update_s32 } )"; auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {})); Literal expected = LiteralUtil::CreateR1<int32_t>({{3, 3}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicAddWithImplicitBroadcast) { const std::string hlo_text = R"( HloModule module update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY entry { zero = s32[] constant(0) one = s32[] constant(1) two = s32[] constant(2) three = s32[] constant(3) input1 = s32[4, 2] iota(), iota_dimension=0 ones = s32[4, 2] broadcast(one), dimensions={} input1_added = s32[4, 2] add(input1, ones) input1_dynamic = s32[<=4, 2] set-dimension-size(input1_added, one), dimensions={0} input2 = s32[4, 2] broadcast(two), dimensions={} input2_dynamic = s32[<=4, 2] set-dimension-size(input2, three), dimensions={0} add = s32[<=4, 2] add(input1_dynamic, input2_dynamic) ROOT reduce = s32[2] reduce(add, zero), dimensions={0}, to_apply=update_s32 } )"; auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {})); Literal expected = LiteralUtil::CreateR1<int32_t>({{9, 9}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicAddWithImplicitSlice) { const std::string hlo_text = R"( HloModule module update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY entry { zero = s32[] constant(0) one = s32[] constant(1) two = s32[] constant(2) three = s32[] constant(3) input1 = s32[4, 2] broadcast(one), dimensions={} input1_dynamic = s32[<=4, 2] set-dimension-size(input1, three), dimensions={0} input2 = s32[4, 2] broadcast(two), dimensions={} input2_dynamic = s32[<=4, 2] set-dimension-size(input2, two), dimensions={0} add = s32[<=4, 2] add(input1_dynamic, input2_dynamic) ROOT reduce = s32[2] reduce(add, zero), dimensions={0}, to_apply=update_s32 } )"; auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {})); Literal expected = LiteralUtil::CreateR1<int32_t>({{6, 6}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicStackPop) { const std::string hlo_text = R"( HloModule module update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } body { param_tuple = (s32[<=4,2]) parameter(0) param = s32[<=4, 2] get-tuple-element(param_tuple), index=0 one = s32[] constant(1) size = s32[] get-dimension-size(param), dimensions={0} new_size = s32[] subtract(size, one) output = s32[<=4, 2] set-dimension-size(param, new_size), dimensions={0} ROOT root = (s32[<=4, 2]) tuple(output) } condition { stack = (s32[<=4,2]) parameter(0) stack_buffer = s32[<=4,2] get-tuple-element(stack), index=0 stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0} two = s32[] constant(2) ROOT greater-than = pred[] compare(s32[] stack_size, s32[] two), direction=GT } ENTRY entry { one = s32[] constant(1) zero = s32[] constant(0) four = s32[] constant(4) stack_buffer_input = s32[4, 2] broadcast(s32[] one), dimensions={} stack_buffer_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, four), dimensions={0} input_tuple = (s32[<=4, 2]) tuple(stack_buffer_dynamic) while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0 ROOT reduce = s32[2] reduce(stack_buffer, zero), dimensions={0}, to_apply=update_s32 } )"; auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {})); Literal expected = LiteralUtil::CreateR1<int32_t>({{2, 2}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DoubleDynamicDimension) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[2, 3, 3] parameter(0) size = s32[] constant(2) param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size), dimensions={1} param_padded = s32[2, 3, <=3] set-dimension-size(param_padded_partial, size), dimensions={2} reshaped = s32[<=18] reshape(param_padded) init = s32[] constant(0) ROOT reduce = s32[] reduce(reshaped, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR3<int32_t>( {{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(16); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicReshapeDoubleDynamicDimensions) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 ENTRY main { param = s32[2, 3, 3] parameter(0) size = s32[] constant(2) param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size), dimensions={1} param_padded = s32[2, <=3, <=3] set-dimension-size(param_padded_partial, size), dimensions={2} result_size = s32[] constant(8) ROOT reshaped = s32[<=18] dynamic-reshape(param_padded, result_size) } )"; Literal operand = LiteralUtil::CreateR3<int32_t>( {{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); result.SetDynamicSize(0, 8); Literal expected = LiteralUtil::CreateR1<int32_t>({0, 1, 3, 4, 0, 1, 3, 4}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicReshapeOutputDoubleDynamicDimensions) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 ENTRY main { param = s32[18] parameter(0) eight = s32[] constant(8) param_dynamic = s32[<=18] set-dimension-size(param, eight), dimensions={0} two = s32[] constant(2) ROOT reshaped = s32[2, <=3, <=3] dynamic-reshape(param_dynamic, two, two, two) } )"; Literal operand = LiteralUtil::CreateR1<int32_t>( {0, 1, 3, 4, 0, 1, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); VLOG(1) << " result: " << result.ToString(); result.SetDynamicSize(1, 2); result.SetDynamicSize(2, 2); Literal expected = LiteralUtil::CreateR3<int32_t>({{{0, 1}, {3, 4}}, {{0, 1}, {3, 4}}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicReshapeComplicated) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 ENTRY main { param = s32[3, 4, 4] parameter(0) two = s32[] constant(2) param_dynamic = s32[<=3, 4, 4] set-dimension-size(param, two), dimensions={0} three = s32[] constant(3) param_dynamic1 = s32[<=3, <=4, 4] set-dimension-size(param_dynamic, three), dimensions={1} param_dynamic2 = s32[<=3, <=4, <=4] set-dimension-size(param_dynamic1, three), dimensions={2} six = s32[] constant(6) ROOT reshaped = s32[<=6, <=8] dynamic-reshape(param_dynamic2, three, six) } )"; Literal operand = LiteralUtil::CreateR3<int32_t>( {{{0, 1, 2, -1}, {3, 4, 5, -1}, {6, 7, 8, -1}, {-1, -1, -1, -1}}, {{9, 8, 7, -1}, {6, 5, 4, -1}, {3, 2, 1, -1}, {-1, -1, -1, -1}}, {{-1, -1, -1, -1}, {-1, -1, -1, -1}, {-1, -1, -1, -1}, {-1, -1, -1, -1}}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); result.SetDynamicSize(0, 3); result.SetDynamicSize(1, 6); Literal expected = LiteralUtil::CreateR2<int32_t>( {{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 8, 7}, {6, 5, 4, 3, 2, 1}}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, SetGetDimensionSize) { const std::string hlo_text = R"( HloModule TensorFlowScatterV1 ENTRY main { param = s32[3] parameter(0) size = s32[] constant(2) param_dynamic_size = s32[3] set-dimension-size(param, size), dimensions={0} ROOT gds = s32[] get-dimension-size(param_dynamic_size), dimensions={0} } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand})); Literal expected = LiteralUtil::CreateR0<int32_t>(2); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicSort) { const std::string hlo_text = R"( HloModule TEST update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } %compare-greater-than (lhs: s32[], rhs: s32[]) -> pred[] { %lhs = s32[] parameter(0) %rhs = s32[] parameter(1) ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT } ENTRY main { param = s32[4] parameter(0) size = s32[] constant(3) param_dynamic_size = s32[<=4] set-dimension-size(param, size), dimensions={0} ROOT sort = s32[<=4]{0} sort(s32[4]{0} %param_dynamic_size), dimensions={0}, is_stable=false, to_apply=%compare-greater-than } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 2}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); Literal expected = LiteralUtil::CreateR1<int32_t>({4, 3, 1, 2}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicPad) { const std::string hlo_text = R"( HloModule TEST update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[4] parameter(0) size = s32[] constant(3) padding = s32[] constant(2) param_dynamic = s32[<=4] set-dimension-size(param, size), dimensions={0} pad = s32[<=6] pad(param_dynamic, padding), padding=1_1 init = s32[] constant(0) ROOT reduce = s32[] reduce(pad, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 5}); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text)); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); Literal expected = LiteralUtil::CreateR0<int32_t>(12); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicPadInteriorPadding) { const std::string hlo_text = R"( HloModule TEST update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[4] parameter(0) size = s32[] constant(3) padding = s32[] constant(2) param_dynamic = s32[<=4] set-dimension-size(param, size), dimensions={0} pad = s32[<=7] pad(param_dynamic, padding), padding=0_0_1 init = s32[] constant(0) ROOT reduce = s32[] reduce(pad, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 5}); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text)); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); Literal expected = LiteralUtil::CreateR0<int32_t>(12); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicConditionalDimension) { const std::string hlo_text = R"( HloModule module update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } true_branch { true_param = (s32[<=3,2]) parameter(0) param = s32[<=3, 2] get-tuple-element(true_param), index=0 add = s32[<=3,2] add(param, param) ROOT true_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add) } false_branch { false_param = (s32[<=3,2]) parameter(0) param = s32[<=3, 2] get-tuple-element(false_param), index=0 add = s32[<=3,2] add(param, param) ROOT false_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add) } ENTRY entry { param0 = s32[3,2] parameter(0) size = s32[] constant(2) branch = pred[] constant(false) param_dynamic = s32[<=3, 2] set-dimension-size(param0, size), dimensions={0} param_tuple = (s32[<=3 ,2]) tuple(param_dynamic) conditional = (s32[<=3, 2], s32[<=3, 2]) conditional(branch, param_tuple, param_tuple), true_computation=true_branch, false_computation=false_branch gte0 = s32[<=3,2] get-tuple-element(conditional), index=1 init = s32[] constant(0) ROOT reduce = s32[2] reduce(gte0, init), dimensions={0}, to_apply=update_s32 } )"; Literal operand = LiteralUtil::CreateR2<int32_t>({{0, 1}, {2, 3}, {4, 5}}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); Literal expected = LiteralUtil::CreateR1<int32_t>({4, 8}); EXPECT_EQ(result, expected); } XLA_TEST_F(ExecutionTest, DynamicTupleSort) { const std::string hlo_text = R"( HloModule TEST %compare-greater-than (lhs: s32[], rhs: s32[], lhs_2: s32[], lhs_2: s32[]) -> pred[] { %lhs = s32[] parameter(0) %rhs = s32[] parameter(1) %lhs_2 = s32[] parameter(2) %rhs_2 = s32[] parameter(3) ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT } update_s32 (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY main { param = s32[3] parameter(0) size = s32[] constant(2) param_dynamic_size = s32[<=3] set-dimension-size(param, size), dimensions={0} sort = (s32[<=3]{0}, s32[<=3]{0}) sort(s32[<=3]{0} %param_dynamic_size, s32[<=3]{0} %param_dynamic_size), dimensions={0}, is_stable=true, to_apply=%compare-greater-than ROOT get-tuple-element = s32[<=3]{0} get-tuple-element((s32[<=3]{0}, s32[<=3]{0}) %sort), index=0 } )"; Literal operand = LiteralUtil::CreateR1<int32_t>({0, 4, 2}); auto module = GetHloModule(hlo_text); TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {&operand}, false)); Literal expected = LiteralUtil::CreateR1<int32_t>({4, 0, 2}); EXPECT_EQ(result, expected); } namespace op = xla::testing::opcode_matchers; class HloDimensionSizeLegalizerTest : public HloTestBase { protected: HloDimensionSizeLegalizerTest() {} }; TEST_F(HloDimensionSizeLegalizerTest, Ok) { auto module = ParseAndReturnVerifiedModule(R"( HloModule _ ENTRY gds { p = s32[3,4] parameter(0) size0 = s32[] get-dimension-size(p), dimensions={0} size1 = s32[] get-dimension-size(p), dimensions={1} ROOT mul = s32[] multiply(size0, size1) })") .value(); DynamicPadder pass; EXPECT_TRUE(pass.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Multiply(op::Constant(), op::Constant())); } TEST_F(HloDimensionSizeLegalizerTest, GetSetSetDimensionSizeRewriter) { auto module = ParseAndReturnVerifiedModule(R"( HloModule _ ENTRY gds { p = s32[3,4] parameter(0) size0 = s32[] get-dimension-size(p), dimensions={0} p_copy = s32[3,4] copy(p) p_copy_dynamic = s32[<=3, 4] set-dimension-size(p_copy, size0), dimensions={0} size1 = s32[] get-dimension-size(p_copy_dynamic), dimensions={0} ROOT mul = s32[] multiply(size0, size1) })") .value(); DynamicPadder pass; EXPECT_TRUE(pass.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Multiply(op::Constant(), op::Constant())); } TEST_F(HloDimensionSizeLegalizerTest, IllegalType) { auto module = ParseAndReturnUnverifiedModule(R"( HloModule _ ENTRY gds { p = s32[3]{0} parameter(0) ROOT gds = s64[] get-dimension-size(p), dimensions={0} })") .value(); DynamicPadder pass; EXPECT_FALSE(pass.Run(module.get()).ok()); } TEST_F(HloDimensionSizeLegalizerTest, IllegalDimension) { auto module = ParseAndReturnUnverifiedModule(R"( HloModule _ ENTRY gds { p = f32[2,5] parameter(0) ROOT gds = s32[] get-dimension-size(p), dimensions={2} })") .value(); DynamicPadder pass; EXPECT_FALSE(pass.Run(module.get()).ok()); } class SizeCheckTest : public HloTestBase { protected: SizeCheckTest() {} }; TEST_F(SizeCheckTest, CompileTimeCheckBinaryOpFail) { auto module = ParseAndReturnUnverifiedModule(R"( HloModule _ ENTRY gds { size_0 = s32[] parameter(0) size_1 = s32[] parameter(1) arg = s32[4]{0} parameter(2) dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0} dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0} ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1) })") .value(); auto options = DynamicPadderOptions(); options.shape_check_mode = DynamicDimensionInference::ShapeCheckMode::kCompileTime; DynamicPadder pass(options); auto status = pass.Run(module.get()).status(); EXPECT_THAT(status.code(), tsl::error::INVALID_ARGUMENT); } TEST_F(SizeCheckTest, CompileTimeCheckBinaryOpPass) { auto module = ParseAndReturnUnverifiedModule(R"( HloModule _ ENTRY gds { size_0 = s32[] parameter(0) size_0_reshape = s32[1] reshape(size_0) size_1 = s32[] reshape(size_0_reshape) arg = s32[4]{0} parameter(1) dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0} dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0} ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1) })") .value(); auto options = DynamicPadderOptions(); options.shape_check_mode = DynamicDimensionInference::ShapeCheckMode::kCompileTime; DynamicDimensionSimplifier simplifier; EXPECT_TRUE(simplifier.Run(module.get()).ok()); DynamicPadder pass(options); auto status = pass.Run(module.get()).status(); EXPECT_TRUE(status.ok()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fb09fbe9-6296-4eab-b4c4-62ab42ce86f5
cpp
tensorflow/tensorflow
reduce_decomposer
third_party/xla/xla/service/reduce_decomposer.cc
third_party/xla/xla/service/reduce_decomposer_test.cc
#include "xla/service/reduce_decomposer.h" #include <functional> #include <utility> #include <vector> #include "absl/status/status.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/hlo_creation_utils.h" namespace xla { namespace { class VariadicReductionLayoutEqualizer : public DfsHloRewriteVisitor { public: absl::Status HandleReduce(HloInstruction* hlo) override { auto reduce = Cast<HloReduceInstruction>(hlo); std::vector<HloInstruction*> new_inputs; bool changed = false; for (HloInstruction* input : reduce->inputs()) { auto first_input = reduce->inputs()[0]; auto first_input_s = first_input->shape(); auto input_s = input->shape(); if (first_input_s.layout() != input_s.layout()) { Shape new_input_s = ShapeUtil::MakeShapeWithDenseLayout( input_s.element_type(), input_s.dimensions(), first_input_s.layout().minor_to_major()); auto copy = MakeCopyHlo(input, new_input_s); changed = true; new_inputs.push_back(copy); } else { new_inputs.push_back(input); } } if (changed) { TF_ASSIGN_OR_RETURN( auto new_reduce, MakeReduceHlo(new_inputs, reduce->init_values(), reduce->dimensions(), reduce->called_computations()[0])); TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, new_reduce)); } return absl::OkStatus(); } }; class ReduceDecomposerVisitor : public DfsHloRewriteVisitor { public: explicit ReduceDecomposerVisitor(HloPredicate custom_layout_allowed) : custom_layout_allowed_(std::move(custom_layout_allowed)) {} absl::Status HandleReduce(HloInstruction* hlo) override { auto reduce = Cast<HloReduceInstruction>(hlo); auto shape = reduce->shape(); if (custom_layout_allowed_ && custom_layout_allowed_(reduce)) { return absl::OkStatus(); } std::vector<Shape> expected_shapes(reduce->input_count()); for (int i = 0; i < reduce->input_count(); i++) { expected_shapes[i] = ExpectedOutputShape(reduce, i); TF_RET_CHECK(reduce->inputs()[i]->shape().layout() == reduce->inputs()[0]->shape().layout()); } std::vector<Shape> output_shapes; if (shape.IsTuple()) { for (int i = 0; i < shape.tuple_shapes_size(); i++) { output_shapes.push_back(ShapeUtil::GetTupleElementShape(shape, i)); TF_RET_CHECK(output_shapes[i].layout() == output_shapes[0].layout()); } } else { output_shapes.push_back(shape); } TF_RET_CHECK(!output_shapes.empty()); if (ShapeUtil::MakeMaybeTupleShape(expected_shapes) != ShapeUtil::MakeMaybeTupleShape(output_shapes)) { TF_ASSIGN_OR_RETURN(auto r_prime, MakeReduceHlo(reduce->inputs(), reduce->init_values(), reduce->dimensions(), reduce->called_computations()[0])); TF_RET_CHECK(r_prime->shape() == ShapeUtil::MakeMaybeTupleShape(expected_shapes)); if (!shape.IsTuple()) { auto copy = MakeCopyHlo(r_prime, shape); TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, copy)); return absl::OkStatus(); } std::vector<HloInstruction*> copies; for (int i = 0; i < reduce->input_count(); i++) { TF_ASSIGN_OR_RETURN(auto from, GetOutput(r_prime, i)); auto copy = MakeCopyHlo(from, output_shapes[i]); copies.push_back(copy); } auto out = MaybeMakeTuple(copies); TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, out)); } return absl::OkStatus(); } private: absl::StatusOr<HloInstruction*> GetOutput(HloInstruction* instr, int idx) { if (instr->shape().IsTuple()) { return MakeGetTupleElementHlo(instr, idx); } else { TF_RET_CHECK(idx == 0); return instr; } } Shape ExpectedOutputShape(HloReduceInstruction* reduce, int input_idx) { Shape reduce_shape = reduce->shape(); auto output_shape = reduce_shape.IsTuple() ? reduce_shape.tuple_shapes(input_idx) : reduce_shape; auto* operand = reduce->inputs()[input_idx]; auto operand_shape = operand->shape(); return ShapeUtil::DeleteDimensions(reduce->dimensions(), operand_shape); } HloPredicate custom_layout_allowed_; }; } absl::StatusOr<bool> ReduceDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_ASSIGN_OR_RETURN(bool changed1, VariadicReductionLayoutEqualizer{}.RunOnModule( module, execution_threads)); TF_ASSIGN_OR_RETURN( bool changed2, ReduceDecomposerVisitor{custom_layout_allowed_}.RunOnModule( module, execution_threads)); return changed1 || changed2; } }
#include "xla/service/reduce_decomposer.h" #include <functional> #include <memory> #include <optional> #include "xla/service/hlo_parser.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { class ReduceDecomposerTest : public HloTestBase {}; TEST_F(ReduceDecomposerTest, ReducePerformsTransposition) { const char* hlo = R"( HloModule module add { a = f32[] parameter(0) b = f32[] parameter(1) ROOT out = add(a, b) } ENTRY c { p = f32[5,3,4]{2,1,0} parameter(0) z = f32[] constant(0) ROOT r = f32[5,4]{0,1} reduce(p, z), dimensions={1}, to_apply=add } )"; RunAndFilecheckHloRewrite( hlo, ReduceDecomposer{[&](const HloInstruction*) { return true; }}, std::nullopt); RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, R"( )"); } TEST_F(ReduceDecomposerTest, ReduceNaturalLayout) { const char* hlo = R"( HloModule module add { a = f32[] parameter(0) b = f32[] parameter(1) ROOT out = add(a, b) } ENTRY c { p = f32[5,3,4]{2,1,0} parameter(0) z = f32[] constant(0) ROOT r = reduce(p, z), dimensions={1}, to_apply=add } )"; RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt); } TEST_F(ReduceDecomposerTest, VariadicReductionWithTranspose) { const char* hlo = R"( HloModule ReduceWithLayoutChangeVariadicDifferent argmax { running_max = f32[] parameter(0) running_max_idx = u32[] parameter(1) current_value = f32[] parameter(2) current_value_idx = u32[] parameter(3) current = (f32[], u32[]) tuple(running_max, running_max_idx) potential = (f32[], u32[]) tuple(current_value, current_value_idx) cmp_code = pred[] compare(current_value, running_max), direction=GT new_max = f32[] select(cmp_code, current_value, running_max) new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx) ROOT out = (f32[], u32[]) tuple(new_max, new_idx) } ENTRY main { arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0) idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1) constant0 = f32[] constant(0) constant1 = u32[] constant(0) ROOT reduce0 = ( f32[2,3,4]{0,1,2}, u32[2,3,4]{0,1,2} ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax } )"; RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, R"( )"); } TEST_F(ReduceDecomposerTest, VariadicReductionDescendingLayout) { const char* hlo = R"( HloModule ReduceWithLayoutChangeVariadicDifferent argmax { running_max = f32[] parameter(0) running_max_idx = u32[] parameter(1) current_value = f32[] parameter(2) current_value_idx = u32[] parameter(3) current = (f32[], u32[]) tuple(running_max, running_max_idx) potential = (f32[], u32[]) tuple(current_value, current_value_idx) cmp_code = pred[] compare(current_value, running_max), direction=GT new_max = f32[] select(cmp_code, current_value, running_max) new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx) ROOT out = (f32[], u32[]) tuple(new_max, new_idx) } ENTRY main { arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0) idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1) constant0 = f32[] constant(0) constant1 = u32[] constant(0) ROOT reduce0 = ( f32[2,3,4]{2,1,0}, u32[2,3,4]{2,1,0} ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax } )"; RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt); } TEST_F(ReduceDecomposerTest, VariadicReductionInputsDifferentLayout) { const char* hlo = R"( HloModule ReduceWithLayoutChangeVariadicDifferent argmax { running_max = f32[] parameter(0) running_max_idx = u32[] parameter(1) current_value = f32[] parameter(2) current_value_idx = u32[] parameter(3) current = (f32[], u32[]) tuple(running_max, running_max_idx) potential = (f32[], u32[]) tuple(current_value, current_value_idx) cmp_code = pred[] compare(current_value, running_max), direction=GT new_max = f32[] select(cmp_code, current_value, running_max) new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx) ROOT out = (f32[], u32[]) tuple(new_max, new_idx) } ENTRY main { arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0) idxs = u32[2,3,4,1024]{2,1,3,0} parameter(1) constant0 = f32[] constant(0) constant1 = u32[] constant(0) ROOT reduce0 = ( f32[2,3,4]{2,1,0}, u32[2,3,4]{2,1,0} ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax } )"; RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, R"( )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f470b0a5-df2e-44f5-a4bb-da606c6ec291
cpp
tensorflow/tensorflow
dynamic_index_splitter
third_party/xla/xla/service/dynamic_index_splitter.cc
third_party/xla/xla/service/dynamic_index_splitter_test.cc
#include "xla/service/dynamic_index_splitter.h" #include <map> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape_util.h" namespace xla { absl::StatusOr<bool> DynamicIndexSplitter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; std::vector<HloComputation*> computations = module->MakeNonfusionComputations(execution_threads); for (HloComputation* computation : computations) { for (HloInstruction* dynamic_op : computation->MakeInstructionPostOrder()) { switch (dynamic_op->opcode()) { case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: break; default: continue; } auto parent = dynamic_op->parent(); bool is_update = dynamic_op->opcode() == HloOpcode::kDynamicUpdateSlice; int64_t num_indices = dynamic_op->operand(0)->shape().rank(); if (num_indices == 0) { if (is_update) { TF_CHECK_OK(parent->ReplaceInstruction( dynamic_op, dynamic_op->mutable_operand(1))); } else { TF_CHECK_OK(parent->ReplaceInstruction( dynamic_op, dynamic_op->mutable_operand(0))); } changed = true; continue; } int64_t index_operand_number = Cast<HloDynamicIndexInstruction>(dynamic_op) ->first_index_operand_number(); auto index_operand = dynamic_op->mutable_operand(index_operand_number); if (ShapeUtil::IsScalar(index_operand->shape())) { continue; } TF_RET_CHECK(index_operand->shape().rank() == 1); auto index_element_type = index_operand->shape().element_type(); std::vector<HloInstruction*> index_array; index_array.reserve(num_indices); for (int64_t dim = 0; dim < num_indices; ++dim) { auto slice = parent->AddInstruction(HloInstruction::CreateSlice( ShapeUtil::MakeShape(index_element_type, {1}), index_operand, {dim}, {dim + 1}, {1})); auto bitcast = parent->AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(index_element_type, {}), slice)); index_array.push_back(bitcast); } auto new_dynamic_op = is_update ? HloInstruction::CreateDynamicUpdateSlice( dynamic_op->shape(), dynamic_op->mutable_operand(0), dynamic_op->mutable_operand(1), absl::MakeSpan(index_array)) : HloInstruction::CreateDynamicSlice( dynamic_op->shape(), dynamic_op->mutable_operand(0), absl::MakeSpan(index_array), dynamic_op->dynamic_slice_sizes()); TF_CHECK_OK(parent->ReplaceWithNewInstruction(dynamic_op, std::move(new_dynamic_op))); changed = true; } } return changed; } }
#include "xla/service/dynamic_index_splitter.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class DynamicIndexSplitterTest : public HloTestBase {}; TEST_F(DynamicIndexSplitterTest, DynamicSlice) { const char* const kDynamicSlice = R"( HloModule DynamicSlice_module ENTRY entry (operand: s32[4,5,6], indices: s32[3]) -> s32[1,1,1] { operand = s32[4,5,6] parameter(0) indices = s32[3] parameter(1) ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, indices), dynamic_slice_sizes={1,1,1} } )"; HloModuleConfig config; DebugOptions debug_options = config.debug_options(); debug_options.set_xla_allow_scalar_index_dynamic_ops(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kDynamicSlice, config)); TF_ASSERT_OK_AND_ASSIGN(bool changed, DynamicIndexSplitter().Run(module.get())); EXPECT_TRUE(changed); ASSERT_THAT(module->entry_computation()->root_instruction(), op::DynamicSlice(op::Parameter(0), op::Reshape(op::Slice(op::Parameter(1))), op::Reshape(op::Slice(op::Parameter(1))), op::Reshape(op::Slice(op::Parameter(1))))); for (int i = 0; i < 3; ++i) { const HloInstruction* slice = module->entry_computation() ->root_instruction() ->operand(i + 1) ->operand(0); EXPECT_EQ(slice->slice_starts(0), i); EXPECT_EQ(slice->slice_limits(0), i + 1); } } TEST_F(DynamicIndexSplitterTest, DynamicUpdateSlice) { const char* const kDynamicUpdateSlice = R"( HloModule DynamicUpdatedSlice_module ENTRY entry (operand: s32[4,5,6], indices: s32[3], update: s32[1,1,1]) -> s32[4,5,6] { operand = s32[4,5,6] parameter(0) indices = s32[3] parameter(1) update = s32[1,1,1] parameter(2) ROOT dynamic-update-slice = s32[4,5,6] dynamic-update-slice(operand, update, indices) } )"; HloModuleConfig config; DebugOptions debug_options = config.debug_options(); debug_options.set_xla_allow_scalar_index_dynamic_ops(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(kDynamicUpdateSlice, config)); TF_ASSERT_OK_AND_ASSIGN(bool changed, DynamicIndexSplitter().Run(module.get())); EXPECT_TRUE(changed); ASSERT_THAT(module->entry_computation()->root_instruction(), op::DynamicUpdateSlice(op::Parameter(0), op::Parameter(2), op::Reshape(op::Slice(op::Parameter(1))), op::Reshape(op::Slice(op::Parameter(1))), op::Reshape(op::Slice(op::Parameter(1))))); for (int i = 0; i < 3; ++i) { const HloInstruction* slice = module->entry_computation() ->root_instruction() ->operand(i + 2) ->operand(0); EXPECT_EQ(slice->slice_starts(0), i); EXPECT_EQ(slice->slice_limits(0), i + 1); } } TEST_F(DynamicIndexSplitterTest, AlreadyScalar) { const char* const kDynamicSlice = R"( HloModule DynamicSlice_module ENTRY entry (operand: s32[4,5,6], index.0: s32[], index.1: s32[], index.2: s32[]) -> s32[1,1,1] { operand = s32[4,5,6] parameter(0) index.0 = s32[] parameter(1) index.1 = s32[] parameter(2) index.2 = s32[] parameter(3) ROOT dynamic-slice = s32[1,1,1] dynamic-slice(operand, index.0, index.1, index.2), dynamic_slice_sizes={1,1,1} } )"; HloModuleConfig config; DebugOptions debug_options = config.debug_options(); debug_options.set_xla_allow_scalar_index_dynamic_ops(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kDynamicSlice, config)); TF_ASSERT_OK_AND_ASSIGN(bool changed, DynamicIndexSplitter().Run(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::DynamicSlice(op::Parameter(0), op::Parameter(1), op::Parameter(2), op::Parameter(3))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_index_splitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d759e3dd-49f9-433a-a301-b7317ffa4f80
cpp
tensorflow/tensorflow
root_instruction_sinker
third_party/xla/xla/service/root_instruction_sinker.cc
third_party/xla/xla/service/root_instruction_sinker_test.cc
#include "xla/service/root_instruction_sinker.h" #include "xla/service/tuple_util.h" namespace xla { namespace { void SinkTupleRoot(HloComputation* computation) { HloInstruction* root = computation->root_instruction(); CHECK(root->shape().IsTuple()); HloInstruction* new_root = TupleUtil::Duplicate(root); HloInstructionSequence& sequence = computation->parent()->schedule().GetOrCreateSequence(computation); for (HloInstruction* operand : new_root->operands()) { sequence.push_back(operand); } sequence.push_back(new_root); computation->set_root_instruction(new_root); } void SinkNontupleRoot(HloComputation* computation) { HloInstruction* root = computation->root_instruction(); CHECK(!root->shape().IsTuple()); HloInstruction* new_root = computation->AddInstruction( HloInstruction::CreateBitcast(root->shape(), root)); HloInstructionSequence& sequence = computation->parent()->schedule().GetOrCreateSequence(computation); sequence.push_back(new_root); computation->set_root_instruction(new_root); } } absl::StatusOr<bool> RootInstructionSinker::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_RET_CHECK(module->has_schedule()); bool modified = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { HloInstructionSequence& sequence = module->schedule().GetOrCreateSequence(computation); if (computation->root_instruction() == sequence.instructions().at(sequence.size() - 1)) { continue; } if (computation->root_instruction()->shape().IsTuple()) { SinkTupleRoot(computation); } else { SinkNontupleRoot(computation); } modified = true; } return modified; } }
#include "xla/service/root_instruction_sinker.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; using RootInstructionSinkerTest = HloTestBase; TEST_F(RootInstructionSinkerTest, TupleNoChange) { absl::string_view hlo_string = R"( HloModule While, is_scheduled=true While.body { loop_var.1 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2) ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply) } While.condition { loop_var.2 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0 constant.2 = s32[] constant(100) ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT } ENTRY While { constant.3 = s32[] constant(42) constant.4 = s32[3]{0} constant({0, 1, 2}) tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4) ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition= While.condition, body=While.body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto while_body = module->entry_computation()->root_instruction()->while_body(); int num_body_instructions = while_body->instruction_count(); RootInstructionSinker sinker; EXPECT_FALSE(sinker.Run(module.get()).value()); EXPECT_EQ(module->entry_computation() ->root_instruction() ->while_body() ->instruction_count(), num_body_instructions); } TEST_F(RootInstructionSinkerTest, Tuple) { absl::string_view hlo_string = R"( HloModule While, is_scheduled=true While.body { loop_var.1 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2) ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply) after-all = token[] after-all() send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1 send-done = token[] send-done(send), channel_id=1 } While.condition { loop_var.2 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0 constant.2 = s32[] constant(100) ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT } ENTRY While { constant.3 = s32[] constant(42) constant.4 = s32[3]{0} constant({0, 1, 2}) tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4) ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition= While.condition, body=While.body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); RootInstructionSinker sinker; EXPECT_TRUE(sinker.Run(module.get()).value()); auto while_body = module->entry_computation()->root_instruction()->while_body(); const auto& sequence = module->schedule().sequence(while_body); EXPECT_EQ(sequence.instructions().at(sequence.size() - 1), while_body->root_instruction()); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::GetTupleElement(op::Tuple()), op::GetTupleElement(op::Tuple()))); } TEST_F(RootInstructionSinkerTest, NontupleNoChange) { absl::string_view hlo_string = R"( HloModule Call, is_scheduled=true Call { param = s32[3]{0} parameter(0) ROOT multiply = s32[3]{0} multiply(param, param) } ENTRY While { constant.4 = s32[3]{0} constant({0, 1, 2}) ROOT call = s32[3]{0} call(constant.4), to_apply=Call } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto called_computation = module->entry_computation()->root_instruction()->called_computations()[0]; int num_instructions = called_computation->instruction_count(); RootInstructionSinker sinker; EXPECT_FALSE(sinker.Run(module.get()).value()); EXPECT_EQ(module->entry_computation() ->root_instruction() ->called_computations()[0] ->instruction_count(), num_instructions); } TEST_F(RootInstructionSinkerTest, Nontuple) { absl::string_view hlo_string = R"( HloModule Call, is_scheduled=true Call { param = s32[3]{0} parameter(0) ROOT multiply = s32[3]{0} multiply(param, param) after-all = token[] after-all() send = (s32[3]{0}, u32[], token[]) send(multiply, after-all), channel_id=1 send-done = token[] send-done(send), channel_id=1 } ENTRY While { constant.4 = s32[3]{0} constant({0, 1, 2}) ROOT call = s32[3]{0} call(constant.4), to_apply=Call } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); RootInstructionSinker sinker; EXPECT_TRUE(sinker.Run(module.get()).value()); auto called_computation = module->entry_computation()->root_instruction()->called_computations()[0]; const auto& sequence = module->schedule().sequence(called_computation); EXPECT_EQ(sequence.instructions().at(sequence.size() - 1), called_computation->root_instruction()); EXPECT_THAT(called_computation->root_instruction(), op::Bitcast(op::Multiply())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/root_instruction_sinker_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5c54fd05-5e24-4353-98e8-4a51907c7f2c
cpp
tensorflow/tensorflow
dot_decomposer
third_party/xla/xla/service/dot_decomposer.cc
third_party/xla/xla/service/dot_decomposer_test.cc
#include "xla/service/dot_decomposer.h" #include <algorithm> #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/shape_inference.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::Status CanonicalizeDot(HloDotInstruction* original_dot) { auto computation = original_dot->parent(); const auto& original_dnums = original_dot->dot_dimension_numbers(); const int64_t num_batch_dims = original_dnums.lhs_batch_dimensions_size(); const int64_t num_contracting_dims = original_dnums.lhs_contracting_dimensions_size(); int lhs_sparse_dim = -1, rhs_sparse_dim = -1; for (const SparsityDescriptor& descriptor : original_dot->sparsity()) { (descriptor.index() == 0 ? lhs_sparse_dim : rhs_sparse_dim) = descriptor.dimension(); } auto move_dim_to_end = [&](std::vector<int64_t>& dims, int sparse_dim) { if (sparse_dim < 0) return; auto it = std::remove(dims.begin(), dims.end(), sparse_dim); *it = sparse_dim; }; const auto& lhs_shape = original_dot->operand(0)->shape(); const int64_t lhs_rank = lhs_shape.rank(); const int64_t num_lhs_non_contracting_dims = lhs_rank - num_batch_dims - num_contracting_dims; std::vector<int64_t> lhs_non_contracting_dims; lhs_non_contracting_dims.reserve(num_lhs_non_contracting_dims); int64_t lhs_contracting_size = 1; bool lhs_contracting_dynamic = false; int64_t lhs_non_contracting_size = 1; bool lhs_non_contracting_dynamic = false; std::vector<int64_t> batch_dim_sizes; batch_dim_sizes.reserve(num_batch_dims); std::vector<bool> batch_dynamic_dims; batch_dynamic_dims.reserve(num_batch_dims); for (int64_t i = 0; i < lhs_rank; ++i) { if (absl::c_linear_search(original_dnums.lhs_contracting_dimensions(), i)) { lhs_contracting_size *= lhs_shape.dimensions(i); lhs_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i); } else if (absl::c_linear_search(original_dnums.lhs_batch_dimensions(), i)) { batch_dim_sizes.push_back(lhs_shape.dimensions(i)); batch_dynamic_dims.push_back(lhs_shape.is_dynamic_dimension(i)); } else { lhs_non_contracting_dims.push_back(i); lhs_non_contracting_size *= lhs_shape.dimensions(i); lhs_non_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i); } } std::vector<int64_t> lhs_transpose; lhs_transpose.reserve(lhs_rank); lhs_transpose.insert(lhs_transpose.end(), original_dnums.lhs_batch_dimensions().begin(), original_dnums.lhs_batch_dimensions().end()); lhs_transpose.insert(lhs_transpose.end(), lhs_non_contracting_dims.begin(), lhs_non_contracting_dims.end()); lhs_transpose.insert(lhs_transpose.end(), original_dnums.lhs_contracting_dimensions().begin(), original_dnums.lhs_contracting_dimensions().end()); move_dim_to_end(lhs_transpose, lhs_sparse_dim); HloInstruction* lhs_operand = original_dot->mutable_operand(0); HloInstruction* transposed_lhs = computation->AddInstruction( HloInstruction::CreateTranspose( ShapeUtil::PermuteDimensions(lhs_transpose, lhs_shape), lhs_operand, lhs_transpose), &lhs_operand->metadata()); std::vector<int64_t> lhs_reshape_dims = batch_dim_sizes; std::vector<bool> lhs_reshape_dynamic_dims = batch_dynamic_dims; if (lhs_non_contracting_size > 1) { lhs_reshape_dims.push_back(lhs_non_contracting_size); lhs_reshape_dynamic_dims.push_back(lhs_non_contracting_dynamic); } lhs_reshape_dims.push_back(lhs_contracting_size); lhs_reshape_dynamic_dims.push_back(lhs_contracting_dynamic); HloInstruction* reshaped_lhs = computation->AddInstruction( HloInstruction::CreateReshape( ShapeUtil::MakeShape(lhs_shape.element_type(), lhs_reshape_dims, lhs_reshape_dynamic_dims), transposed_lhs), &transposed_lhs->metadata()); const auto& rhs_shape = original_dot->operand(1)->shape(); const int64_t rhs_rank = rhs_shape.rank(); const int64_t num_rhs_non_contracting_dims = rhs_rank - num_batch_dims - num_contracting_dims; std::vector<int64_t> rhs_non_contracting_dims; rhs_non_contracting_dims.reserve(num_rhs_non_contracting_dims); int64_t rhs_non_contracting_size = 1; bool rhs_non_contracting_dynamic = false; int64_t rhs_contracting_size = 1; bool rhs_contracting_dynamic = false; for (int64_t i = 0; i < rhs_rank; ++i) { if (absl::c_linear_search(original_dnums.rhs_contracting_dimensions(), i)) { rhs_contracting_size *= rhs_shape.dimensions(i); rhs_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i); } else if (!absl::c_linear_search(original_dnums.rhs_batch_dimensions(), i)) { rhs_non_contracting_dims.push_back(i); rhs_non_contracting_size *= rhs_shape.dimensions(i); rhs_non_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i); } } std::vector<int64_t> rhs_transpose; rhs_transpose.reserve(rhs_rank); rhs_transpose.insert(rhs_transpose.end(), original_dnums.rhs_batch_dimensions().begin(), original_dnums.rhs_batch_dimensions().end()); rhs_transpose.insert(rhs_transpose.end(), original_dnums.rhs_contracting_dimensions().begin(), original_dnums.rhs_contracting_dimensions().end()); move_dim_to_end(rhs_transpose, rhs_sparse_dim); rhs_transpose.insert(rhs_transpose.end(), rhs_non_contracting_dims.begin(), rhs_non_contracting_dims.end()); HloInstruction* rhs_operand = original_dot->mutable_operand(1); HloInstruction* transposed_rhs = computation->AddInstruction( HloInstruction::CreateTranspose( ShapeUtil::PermuteDimensions(rhs_transpose, rhs_shape), rhs_operand, rhs_transpose), &rhs_operand->metadata()); std::vector<int64_t> rhs_reshape_dims = batch_dim_sizes; rhs_reshape_dims.push_back(rhs_contracting_size); std::vector<bool> rhs_reshape_dynamic_dims = batch_dynamic_dims; rhs_reshape_dynamic_dims.push_back(rhs_contracting_dynamic); if (rhs_non_contracting_size > 1) { rhs_reshape_dims.push_back(rhs_non_contracting_size); rhs_reshape_dynamic_dims.push_back(rhs_non_contracting_dynamic); } HloInstruction* reshaped_rhs = computation->AddInstruction( HloInstruction::CreateReshape( ShapeUtil::MakeShape(rhs_shape.element_type(), rhs_reshape_dims, rhs_reshape_dynamic_dims), transposed_rhs), &transposed_rhs->metadata()); std::vector<int64_t> dot_dims = batch_dim_sizes; std::vector<bool> dot_dynamic_dims = batch_dynamic_dims; if (lhs_non_contracting_size > 1) { dot_dims.push_back(lhs_non_contracting_size); dot_dynamic_dims.push_back(lhs_non_contracting_dynamic); } if (rhs_non_contracting_size > 1) { dot_dims.push_back(rhs_non_contracting_size); dot_dynamic_dims.push_back(rhs_non_contracting_dynamic); } DotDimensionNumbers dot_dnums; for (int64_t i = 0; i < num_batch_dims; ++i) { dot_dnums.add_lhs_batch_dimensions(i); dot_dnums.add_rhs_batch_dimensions(i); } dot_dnums.add_lhs_contracting_dimensions( num_batch_dims + (lhs_non_contracting_size > 1 ? 1 : 0)); dot_dnums.add_rhs_contracting_dimensions(num_batch_dims); std::vector<SparsityDescriptor> sparsity; std::vector<HloInstruction*> sparse_meta; sparsity.reserve(original_dot->sparse_operands()); sparse_meta.reserve(original_dot->sparse_operands()); auto transpose_meta = [&](HloInstruction* original_meta, absl::Span<const int64_t> transpose) { return computation->AddInstruction( HloInstruction::CreateTranspose( ShapeUtil::PermuteDimensions(transpose, original_meta->shape()), original_meta, transpose), &original_meta->metadata()); }; for (int i = 0; i < original_dot->sparse_operands(); ++i) { SparsityDescriptor descriptor = original_dot->sparsity()[i]; descriptor.set_dimension(num_batch_dims + (descriptor.index() == 0 && lhs_non_contracting_size > 1)); sparsity.push_back(descriptor); HloInstruction* meta = original_dot->mutable_operand(HloDotInstruction::kOperands + i); HloInstruction* meta_operand; if (descriptor.index() == 0) { meta = transpose_meta(meta, lhs_transpose); meta_operand = reshaped_lhs; } else { meta = transpose_meta(meta, rhs_transpose); meta_operand = reshaped_rhs; } TF_ASSIGN_OR_RETURN(Shape result_shape, ShapeInference::InferSparseDotMetadataShape( meta_operand->shape(), dot_dnums, descriptor)); meta = computation->AddInstruction( HloInstruction::CreateReshape(result_shape, meta), &meta->metadata()); sparse_meta.push_back(meta); } HloInstruction* dot = computation->AddInstruction(HloInstruction::CreateDot( ShapeUtil::MakeShape(original_dot->shape().element_type(), dot_dims, dot_dynamic_dims), reshaped_lhs, reshaped_rhs, dot_dnums, original_dot->precision_config(), sparsity, sparse_meta)); original_dot->SetupDerivedInstruction(dot); std::unique_ptr<HloInstruction> replacement = HloInstruction::CreateReshape(original_dot->shape(), dot); VLOG(3) << "Canonicalizing dot:\n" << "\t old: " << original_dot->ToString() << "\n" << "\t new: " << dot->ToString() << "\n" << "\t -> " << replacement->ToString(); return computation->ReplaceWithNewInstruction(original_dot, std::move(replacement)); } } absl::StatusOr<bool> DotDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { std::vector<HloInstruction*> non_canonical_dots; for (auto* computation : module->MakeNonfusionComputations(execution_threads)) { for (auto* instruction : computation->instructions()) { if (instruction->opcode() != HloOpcode::kDot) { continue; } const DotDimensionNumbers& dnums = instruction->dot_dimension_numbers(); if (dnums.lhs_contracting_dimensions_size() != 1) { non_canonical_dots.push_back(instruction); continue; } if (dnums.lhs_batch_dimensions_size() + 2 < instruction->operand(0)->shape().rank() || dnums.rhs_batch_dimensions_size() + 2 < instruction->operand(1)->shape().rank()) { non_canonical_dots.push_back(instruction); continue; } if (dnums.lhs_batch_dimensions().empty() && dnums.lhs_contracting_dimensions().empty()) { non_canonical_dots.push_back(instruction); continue; } std::vector<int64_t> canonical_batch_dims( dnums.lhs_batch_dimensions_size()); absl::c_iota(canonical_batch_dims, 0); if (!absl::c_equal(dnums.lhs_batch_dimensions(), canonical_batch_dims) || !absl::c_equal(dnums.rhs_batch_dimensions(), canonical_batch_dims)) { non_canonical_dots.push_back(instruction); } } } bool changed = false; for (auto* dot : non_canonical_dots) { TF_RETURN_IF_ERROR(CanonicalizeDot(Cast<HloDotInstruction>(dot))); changed = true; } return changed; } }
#include "xla/service/dot_decomposer.h" #include <memory> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace { namespace m = ::xla::match; namespace op = ::xla::testing::opcode_matchers; using DotDecomposerTest = HloTestBase; TEST_F(DotDecomposerTest, CanonicalizeMultipleNonContractingDims) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = f32[64,63,512]{2,1,0} parameter(0) p1 = f32[512,512]{1,0} parameter(1) ROOT dot = f32[64,63,512]{2,1,0} dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool canonicalized, DotDecomposer().Run(module.get())); EXPECT_TRUE(canonicalized); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(), 1, 0), op::Shape("f32[4032,512]")))); } TEST_F(DotDecomposerTest, DontCanonicalizeIfNoNoncontractingDims) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = f32[64,4]{1,0} parameter(0) p1 = f32[64,4]{1,0} parameter(1) ROOT dot = f32[64]{0} dot(p0, p1), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool canonicalized, DotDecomposer().Run(module.get())); EXPECT_FALSE(canonicalized); } TEST_F(DotDecomposerTest, DontAddLhsNonContractingDimIfOne) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = f32[64,4]{1,0} parameter(0) p1 = f32[64,4,2,1]{3,2,1,0} parameter(1) ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool canonicalized, DotDecomposer().Run(module.get())); EXPECT_TRUE(canonicalized); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(), 1, 1), op::Shape("f32[64,2]")))); } TEST_F(DotDecomposerTest, DontAddRhsNonContractingDimIfOne) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = f32[64,4,2,1]{3,2,1,0} parameter(0) p1 = f32[64,4]{1,0} parameter(1) ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool canonicalized, DotDecomposer().Run(module.get())); EXPECT_TRUE(canonicalized); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(), 2, 1), op::Shape("f32[64,2]")))); } template <typename Arg0, typename Arg1, typename Arg2> auto SparseDotMatcher(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) { return match::Op() .WithOpcode(HloOpcode::kDot) .WithOperand(0, std::forward<Arg0>(arg0)) .WithOperand(1, std::forward<Arg1>(arg1)) .WithOperand(2, std::forward<Arg2>(arg2)); } TEST_F(DotDecomposerTest, CanonicalizeSparseLhs) { absl::string_view kHlo = R"( HloModule module ENTRY main { lhs = f32[16,4,3,7] parameter(0) rhs = f32[32,4,5,7] parameter(1) meta = u16[2,4,3,7] parameter(2) ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=L.0@2:4, lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, lhs_batch_dims={3}, rhs_batch_dims={3} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHlo)); TF_ASSERT_OK_AND_ASSIGN(bool canonicalized, DotDecomposer().Run(module.get())); EXPECT_TRUE(canonicalized); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher( m::Reshape(m::Transpose(m::Parameter(0))), m::Reshape(m::Transpose(m::Parameter(1))), m::Reshape(m::Transpose(m::Parameter(2))))))); auto dot = Cast<HloDotInstruction>(root->operand(0)); auto descriptor = dot->sparsity().front(); EXPECT_EQ(descriptor.index(), 0); EXPECT_EQ(descriptor.dimension(), 2); } TEST_F(DotDecomposerTest, CanonicalizeSparseRhs) { absl::string_view kHlo = R"( HloModule module ENTRY main { lhs = f32[32,4,3,7] parameter(0) rhs = f32[16,4,5,7] parameter(1) meta = u16[2,4,5,7] parameter(2) ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=R.0@2:4, lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, lhs_batch_dims={3}, rhs_batch_dims={3} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHlo)); TF_ASSERT_OK_AND_ASSIGN(bool canonicalized, DotDecomposer().Run(module.get())); EXPECT_TRUE(canonicalized); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher( m::Reshape(m::Transpose(m::Parameter(0))), m::Reshape(m::Transpose(m::Parameter(1))), m::Reshape(m::Transpose(m::Parameter(2))))))); auto dot = Cast<HloDotInstruction>(root->operand(0)); auto descriptor = dot->sparsity().front(); EXPECT_EQ(descriptor.index(), 1); EXPECT_EQ(descriptor.dimension(), 1); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8a31a24b-fd06-41cc-bf29-c12ce667dce2
cpp
tensorflow/tensorflow
hlo_constant_folding
third_party/xla/xla/service/hlo_constant_folding.cc
third_party/xla/xla/service/hlo_constant_folding_test.cc
#include "xla/service/hlo_constant_folding.h" #include <algorithm> #include <atomic> #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "xla/hlo/evaluator/hlo_evaluator.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/service/slow_operation_alarm.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" namespace xla { static bool IsOrContainsIllegalInstr(const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kAfterAll || instr->opcode() == HloOpcode::kRng) { return true; } for (const HloComputation* c : instr->called_computations()) { if (absl::c_any_of(c->instructions(), IsOrContainsIllegalInstr)) { return true; } } return false; } std::atomic<int64_t> HloConstantFolding::slow_op_counter_{0}; absl::StatusOr<bool> HloConstantFolding::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { auto evaluator = std::make_unique<HloEvaluator>(0); evaluator->set_use_fast_path(true); std::vector<HloInstruction*> dead_instructions; for (auto* computation : module->MakeNonfusionComputations(execution_threads)) { for (auto* instruction : computation->MakeInstructionPostOrder()) { if (instruction->IsDead()) { continue; } if (!absl::c_any_of(instruction->operands(), HloPredicateIsOp<HloOpcode::kConstant>) || !absl::c_all_of( instruction->operands(), [](const HloInstruction* operand) { return operand->opcode() == HloOpcode::kConstant || (operand->opcode() == HloOpcode::kBroadcast && operand->operand(0)->opcode() == HloOpcode::kConstant); })) { continue; } if (instruction->opcode() == HloOpcode::kParameter || instruction->opcode() == HloOpcode::kConstant || instruction->opcode() == HloOpcode::kTuple) { continue; } if (instruction->opcode() == HloOpcode::kBroadcast || instruction->opcode() == HloOpcode::kIota) { continue; } if (instruction->IsAsynchronous() && instruction->async_execution_thread() != instruction->parent()->execution_thread()) { continue; } if (instruction->opcode() == HloOpcode::kFft) { continue; } if (IsOrContainsIllegalInstr(instruction)) { continue; } if (instruction->HasSideEffect()) { continue; } if (instruction->opcode() == HloOpcode::kPad && instruction->operand(0)->opcode() == HloOpcode::kBroadcast && instruction->operand(1)->opcode() == HloOpcode::kConstant) { continue; } if (instruction->shape().IsArray()) { int64_t elements_in_operands = 0; for (HloInstruction* operand : instruction->operands()) { if (operand->shape().IsArray()) { elements_in_operands += ShapeUtil::ElementsIn(operand->shape()); } } int64_t elements_in_constant = ShapeUtil::ElementsIn(instruction->shape()); static const int64_t kMaximumConstantSizeElements = 45 * 1000 * 1000; if (std::max(elements_in_constant, elements_in_operands) > kMaximumConstantSizeElements) { VLOG(2) << "Ignore constant folding: result shape size is " << elements_in_constant << " total size of arguments is " << elements_in_operands; continue; } } VLOG(5) << "Constant folding: " << instruction->ToString(); absl::Duration slow_timeout = absl::Seconds(uint64_t{1} << slow_op_counter_.load()); SlowOperationAlarm slow_alarm(slow_timeout, [instruction, slow_timeout] { const bool ndebug = #if NDEBUG true; #else false; #endif absl::string_view explanation_msg = ndebug ? "This isn't necessarily a bug; constant-folding is " "inherently a trade-off between compilation time and speed " "at runtime. XLA has some guards that attempt to keep " "constant folding from taking too long, but fundamentally " "you'll always be able to come up with an input program that " "takes a long time.\n\n" "If you'd like to file a bug, run with envvar " "XLA_FLAGS=--xla_dump_to=/tmp/foo and attach the results." : "XLA was built without compiler optimizations, which can be " "slow. Try rebuilding with -c opt."; return absl::StrFormat( "Constant folding an instruction is taking > %s:\n\n" " %s\n\n" "%s", absl::FormatDuration(slow_timeout), instruction->ToString(), explanation_msg); }); Literal result; if (!evaluator->TryEvaluate( instruction, &result, true)) { VLOG(2) << "Constant folding failed for instruction: " << instruction->ToString(); continue; } slow_alarm.cancel(); if (slow_alarm.fired()) { slow_op_counter_++; } VLOG(4) << "Constant folded: " << instruction->ToString(); dead_instructions.push_back(instruction); HloInstruction* new_constant = instruction->AddInstruction( HloInstruction::CreateConstant(std::move(result))); if (new_constant->shape().has_layout()) { new_constant->mutable_shape() ->mutable_layout() ->set_element_size_in_bits( instruction->shape().layout().element_size_in_bits()); } TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_constant)); } } const bool changed = !dead_instructions.empty(); for (HloInstruction* dead_instruction : dead_instructions) { CHECK(dead_instruction->IsDead()); HloComputation* computation = dead_instruction->parent(); TF_RETURN_IF_ERROR(computation->RemoveInstruction(dead_instruction)); } return changed; } }
#include "xla/service/hlo_constant_folding.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/layout_util.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/permutation_util.h" #include "xla/primitive_util.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; namespace m = xla::match; using HloConstantFoldingTest = HloTestBase; TEST_F(HloConstantFoldingTest, ConvertF32ToS64) { HloComputation::Builder builder(TestName()); HloInstruction* input = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); builder.AddInstruction( HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {}), input)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input)))); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_TRUE(result); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant())); EXPECT_EQ( computation->root_instruction()->literal().GetFirstElement<int64_t>(), 42); } TEST_F(HloConstantFoldingTest, ConvertS64ToF32) { HloComputation::Builder builder(TestName()); HloInstruction* input = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64_t>(42))); builder.AddInstruction( HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input)))); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_TRUE(result); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant())); EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(), 42.0f); } TEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) { HloComputation::Builder builder(TestName()); HloInstruction* input = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({42.0f, 19.0f}))); builder.AddInstruction( HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {2}), input)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Convert().WithOperand(0, m::Op().Is(input)))); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_TRUE(result); EXPECT_THAT(computation->root_instruction(), GmockMatch(m::Constant())); EXPECT_EQ(computation->root_instruction()->literal().Get<int64_t>({0}), 42); EXPECT_EQ(computation->root_instruction()->literal().Get<int64_t>({1}), 19); } TEST_F(HloConstantFoldingTest, Concatenate) { const struct TestConfig { int concat_dimension; std::vector<int64_t> dimensions; std::vector<int64_t> concat_sizes; } test_configs[] = { {1, {11, 0, 7, 5, 9}, {2, 5, 7, 11}}, {3, {1, 4, 17, 0, 8}, {1, 3, 9, 12}}, }; for (auto& test_config : test_configs) { HloComputation::Builder builder(TestName()); std::vector<int64_t> dimensions(test_config.dimensions.begin(), test_config.dimensions.end()); int64_t concat_size = 0; std::vector<HloInstruction*> operands; for (auto csize : test_config.concat_sizes) { dimensions[test_config.concat_dimension] = csize; concat_size += csize; auto literal = LiteralUtil::CreateFromDimensions(F32, dimensions); HloInstruction* insn = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); operands.push_back(insn); } dimensions[test_config.concat_dimension] = concat_size; Shape shape = ShapeUtil::MakeShape(F32, dimensions); builder.AddInstruction(HloInstruction::CreateConcatenate( shape, operands, test_config.concat_dimension)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_TRUE(result); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Constant())); EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape)); } } TEST_F(HloConstantFoldingTest, Slice) { HloComputation::Builder builder(TestName()); const int64_t dimensions[] = {11, 8, 7, 5, 9}; const int64_t slice_start[] = {4, 2, 3, 1, 5}; const int64_t slice_limits[] = {10, 8, 6, 5, 9}; const int64_t slice_strides[] = {1, 1, 1, 1, 1}; TF_ASSERT_OK_AND_ASSIGN(auto literal, LiteralUtil::CreateRandomLiteral<F32>( ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0)); HloInstruction* literal_instruction = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); Shape shape = ShapeUtil::MakeShape(F32, {6, 6, 3, 4, 4}); builder.AddInstruction(HloInstruction::CreateSlice( shape, literal_instruction, slice_start, slice_limits, slice_strides)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_TRUE(result); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Constant())); EXPECT_TRUE(ShapeUtil::Equal(root->shape(), shape)); } TEST_F(HloConstantFoldingTest, TransposeConstantFold) { HloComputation::Builder builder(TestName()); const int64_t dimensions[] = {11, 8, 7, 5, 9}; TF_ASSERT_OK_AND_ASSIGN(auto literal, LiteralUtil::CreateRandomLiteral<F32>( ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0)); auto literal_clone = literal.Clone(); HloInstruction* literal_instruction = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5}); const int64_t permutation[] = {1, 2, 0, 4, 3}; builder.AddInstruction( HloInstruction::CreateTranspose(shape, literal_instruction, permutation)); auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation(builder.Build()); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_TRUE(result); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Constant())); EXPECT_TRUE(ShapeUtil::Compatible(root->shape(), shape)); using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type; bool matched = true; root->literal().EachCell<NativeT>( [&](absl::Span<const int64_t> indices, NativeT value) { std::vector<int64_t> rindexes = PermuteInverse(indices, permutation); matched = matched && (value == literal_clone.Get<NativeT>(rindexes)); }); EXPECT_TRUE(matched); } const char* const kConstantFoldReduce = R"( HloModule ConstantFoldReduce add { a = s32[] parameter(0) b = s32[] parameter(1) ROOT add = s32[] add(a, b) } ENTRY r { x = s32[3] constant({1, 2, 3}) init = s32[] constant(0) ROOT reduce = s32[] reduce(x, init), dimensions={0}, to_apply=add })"; TEST_F(HloConstantFoldingTest, ConstantFoldReduce) { TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kConstantFoldReduce)); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get())); EXPECT_TRUE(result); EXPECT_EQ(6, m->entry_computation() ->root_instruction() ->literal() .GetFirstElement<int32_t>()); } constexpr absl::string_view kConstantFoldReduceWithMetadata = R"( HloModule ConstantFoldReduce add { a = s32[] parameter(0) b = s32[] parameter(1) ROOT add = s32[] add(a, b) } ENTRY r { x = s32[3] constant({1, 2, 3}), metadata={op_name="constant"} init = s32[] constant(0), metadata={op_name="zero_constant"} ROOT reduce = s32[] reduce(x, init), metadata={op_name="reduce"}, dimensions={0}, to_apply=add })"; TEST_F(HloConstantFoldingTest, ConstantFoldReduceCheckMetadata) { TF_ASSERT_OK_AND_ASSIGN( auto m, ParseAndReturnVerifiedModule(kConstantFoldReduceWithMetadata)); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get())); EXPECT_TRUE(result); OpMetadata reduce_metadata; reduce_metadata.set_op_name("reduce"); EXPECT_THAT(m->entry_computation()->root_instruction(), AllOf(op::Constant(), op::Metadata(reduce_metadata))); } TEST_F(HloConstantFoldingTest, ConstantFoldReduceNoLayout) { TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kConstantFoldReduce)); HloInstruction* add = (*m->computations().begin())->root_instruction(); LayoutUtil::ClearLayout(add->mutable_shape()); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(m.get())); EXPECT_FALSE(result); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reduce())); } const char* const kConstantFoldLargePad = R"( HloModule ConstantFoldLargePad ENTRY r { a = f32[1,1,1] constant({{{7}}}) b = f32[] constant(42) ROOT pad = f32[2048,2048,128] pad(a, b), padding=1024_1023x1024_1023x64_63 })"; TEST_F(HloConstantFoldingTest, DoesNotFoldLargePad) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kConstantFoldLargePad)); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_FALSE(result); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Pad(m::Constant(), m::Constant()))); } TEST_F(HloConstantFoldingTest, DoesNotFoldPadBroadcast) { const char* const kConstantFoldPadBroadcast = R"( HloModule ConstantFoldLargePad ENTRY r { a = f32[] constant(239) broadcast_a = f32[4] broadcast(a), dimensions={} b = f32[] constant(42) ROOT pad = f32[8] pad(f32[4] broadcast_a, f32[] b), padding=4_0 })"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(kConstantFoldPadBroadcast)); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_FALSE(result); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Pad(m::Broadcast(), m::Constant()))); } TEST_F(HloConstantFoldingTest, DoesNotFoldSlicesWithLargeOperand) { const char* const kModuleStr = R"( HloModule test ENTRY r { a = f32[] constant(42) broadcast = f32[1000000000]{0} broadcast(a), dimensions={} slice1 = f32[10000]{0} slice(broadcast), slice={[0:10000]} slice2 = f32[10000]{0} slice(broadcast), slice={[10000:20000]} ROOT add = f32[10000]{0} add(slice1, slice2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); HloConstantFolding const_folder; TF_ASSERT_OK_AND_ASSIGN(bool result, const_folder.Run(module.get())); EXPECT_FALSE(result); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Add(m::Slice(), m::Slice()))); } TEST_F(HloConstantFoldingTest, DontFoldSubcomputationContainingAfterAll) { const char* const kModuleStr = R"( HloModule test Fn { tok = token[] after-all() ROOT root = f32[10] iota(), iota_dimension=0 } ENTRY entry { ROOT call = f32[10] call(), to_apply=Fn })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); HloConstantFolding constant_folding; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&constant_folding, module.get())); EXPECT_FALSE(result); } TEST_F(HloConstantFoldingTest, DontFoldSubcomputationTransitivelyContainingRng) { const char* const kModuleStr = R"( HloModule test InnerFn { c0 = f32[] constant(0) c1 = f32[] constant(1) ROOT rng = f32[10] rng(c0, c1), distribution=rng_uniform } Fn { ROOT fusion = f32[10] fusion(), kind=kLoop, calls=InnerFn } ENTRY entry { ROOT call = f32[10] call(), to_apply=Fn })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); HloConstantFolding constant_folding; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&constant_folding, module.get())); EXPECT_FALSE(result); } TEST_F(HloConstantFoldingTest, FoldOpsWhereOneOperandIsBroadcast) { const char* const kModuleStr = R"( HloModule test ENTRY entry { not_folded1 = f32[4] broadcast(f32[] constant(1)) not_folded2 = add(f32[4] broadcast(f32[] constant(2)), f32[4] broadcast(f32[] constant(3))) folded1 = add(f32[4] broadcast(f32[] constant(5)), f32[4] constant({0,1,2,3})) folded2 = add(f32[4] constant({0,1,2,3}), f32[4] broadcast(f32[] constant(5))) ROOT root = tuple(not_folded1, not_folded2, folded1, folded2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); HloConstantFolding constant_folding; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&constant_folding, module.get())); EXPECT_TRUE(result); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Broadcast(m::Constant()), m::Add(m::Broadcast(m::Constant()), m::Broadcast(m::Constant())), m::Constant(), m::Constant() ))); } TEST_F(HloConstantFoldingTest, FoldInt4Ops) { const char* const kModuleStr = R"( HloModule test ENTRY entry { c0 = s4[2]{0:E(4)} constant({1, 2}) c1 = s4[2]{0:E(4)} constant({3, 4}) add1 = s4[2]{0:E(4)} add(c0, c1) c2 = s4[]{:E(4)} constant(5) add2 = s4[2]{0:E(4)} add(c0, s4[2]{0:E(4)} broadcast(c2)) ROOT root = tuple(add1, add2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); HloConstantFolding constant_folding; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&constant_folding, module.get())); EXPECT_TRUE(result); auto is_4_bit = [](const HloInstruction* instr) { return instr->shape().layout().element_size_in_bits() == 4; }; EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Constant().WithPredicate(is_4_bit), m::Constant().WithPredicate(is_4_bit)))); } TEST_F(HloConstantFoldingTest, BigReduceWindow) { constexpr absl::string_view kModuleStr = R"( HloModule test add_bf16 { lhs = bf16[] parameter(0) rhs = bf16[] parameter(1) ROOT add = bf16[] add(lhs, rhs) } ENTRY accumulated_all_reduce { x = bf16[160,10,10,512]{3,2,1,0} broadcast(bf16[] constant(1.0)) init = bf16[] constant(0) ROOT reduce-window = reduce-window(x, init), window={size=1x2x2x1 stride=1x2x2x1}, to_apply=add_bf16 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); HloConstantFolding constant_folding; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&constant_folding, module.get())); EXPECT_TRUE(result); } TEST_F(HloConstantFoldingTest, TimingConsumingTest) { constexpr absl::string_view mod_str = R"( HloModule jit_f, entry_computation_layout={()->f32[]} region_0.4 { Arg_0.5 = f32[] parameter(0) Arg_1.6 = f32[] parameter(1) ROOT add.7 = f32[] add(Arg_0.5, Arg_1.6) } ENTRY main.9 { constant.1 = f32[] constant(1) broadcast.2 = f32[32,999,40,512]{3,2,1,0} broadcast(constant.1), dimensions={} constant.3 = f32[] constant(0) ROOT reduce.8 = f32[] reduce(broadcast.2, constant.3), dimensions={0,1,2,3}, to_apply=region_0.4 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(mod_str)); HloConstantFolding const_fold; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&const_fold, module.get())); EXPECT_FALSE(result); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_constant_folding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_constant_folding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
39ece867-becf-40a5-be85-a8bfb9996b0b
cpp
tensorflow/tensorflow
all_reduce_reassociate
third_party/xla/xla/service/all_reduce_reassociate.cc
third_party/xla/xla/service/all_reduce_reassociate_test.cc
#include "xla/service/all_reduce_reassociate.h" #include <cstdint> #include <optional> #include <string> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/literal.h" #include "xla/primitive_util.h" #include "xla/service/all_reduce_key.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" namespace xla { namespace { namespace m = match; bool AreAllreduceKeysEqual(AllReduceKey& key0, AllReduceKey& key1, bool ignore_element_type) { if (ignore_element_type) { return std::get<0>(key0) == std::get<0>(key1) && std::get<2>(key0) == std::get<2>(key1) && std::get<3>(key0) == std::get<3>(key1) && std::get<4>(key0) == std::get<4>(key1) && std::get<5>(key0) == std::get<5>(key1); } else { return key0 == key1; } } bool AreCompatible(const HloAllReduceInstruction* ar0, const HloAllReduceInstruction* ar1, ReductionKind op_kind, bool ignore_element_type) { std::optional<AllReduceKey> key0 = GetAllReduceKey(ar0); std::optional<AllReduceKey> key1 = GetAllReduceKey(ar1); auto kind0 = MatchReductionComputation(ar0->to_apply()); return key0 && key1 && kind0 && AreAllreduceKeysEqual(*key0, *key1, ignore_element_type) && kind0 == op_kind; } HloInstruction* LookThroughForAllReduce(HloInstruction* instr, const Literal& reduction_identity) { if (instr->opcode() == HloOpcode::kDynamicSlice) { if (instr->operand(0)->opcode() != HloOpcode::kAllReduce || instr->operand(0)->user_count() != 1 || instr->user_count() != 1) { return nullptr; } return instr; } while (instr->opcode() != HloOpcode::kAllReduce) { if (instr->user_count() != 1) { return nullptr; } if (instr->opcode() != HloOpcode::kReshape && instr->opcode() != HloOpcode::kPad && instr->opcode() != HloOpcode::kSlice && instr->opcode() != HloOpcode::kConvert) { return nullptr; } if (instr->opcode() == HloOpcode::kPad) { if (!instr->operand(1)->IsConstant()) { return nullptr; } if (instr->operand(1)->literal() != reduction_identity) { return nullptr; } } instr = instr->mutable_operand(0); } if (instr->user_count() != 1) { return nullptr; } return instr; } bool ReassociateAllReduceIsProfitable(HloInstruction* ar0, HloInstruction* ar1, HloInstruction* reassociated_inst) { int64_t pre_reassociated_size = ShapeUtil::ElementsIn(ar0->shape()); if (ar0 != ar1) { pre_reassociated_size += ShapeUtil::ElementsIn(ar1->shape()); } return pre_reassociated_size >= ShapeUtil::ElementsIn(reassociated_inst->shape()); } bool AreCompatibleConverts(const HloInstruction* convert0, const HloInstruction* convert1) { bool is_compatible = true; if (convert0) { is_compatible &= primitive_util::CastPreservesValues( convert0->operand(0)->shape().element_type(), convert0->shape().element_type()); } if (convert1) { is_compatible &= primitive_util::CastPreservesValues( convert1->operand(0)->shape().element_type(), convert1->shape().element_type()); } if (convert0 && convert1) { CHECK(convert0->shape().element_type() == convert1->shape().element_type()); is_compatible &= convert0->operand(0)->shape().element_type() == convert1->operand(0)->shape().element_type(); } return is_compatible; } template <typename Pattern> auto OptionalConvertWithOneUser(HloInstruction** optional_convert, Pattern pattern) { return m::AnyOf<HloInstruction>( m::Convert(optional_convert, pattern).WithOneUser(), std::move(pattern)); } bool MatchOperandsToAllReduceWithOptionalConvert(HloInstruction* inst, HloInstruction** convert0, HloInstruction** convert1) { auto ar_op_optional_convert_pattern = m::Op() .WithOperand(0, OptionalConvertWithOneUser(convert0, m::AllReduce())) .WithOperand(1, OptionalConvertWithOneUser(convert1, m::AllReduce())) .WithPredicate([](const HloInstruction* inst) { return inst->shape().IsArray(); }); return Match(inst, ar_op_optional_convert_pattern); } } absl::StatusOr<bool> AllReduceReassociate::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) { VLOG(1) << "Skip AllReduceReassociate because the module contains all-reduce " "with constrained layouts"; return false; } int64_t next_channel_id = hlo_query::NextChannelId(*module); bool changed = false; for (auto computation : module->computations(execution_threads)) { for (HloInstruction* inst : computation->MakeInstructionPostOrder()) { std::optional<ReductionKind> kind = MatchReductionInstruction(inst); if (!kind) { continue; } std::optional<Literal> reduction_identity = GetReductionIdentity(*kind, inst->shape().element_type()); if (!reduction_identity) { continue; } HloInstruction* lhs = LookThroughForAllReduce(inst->mutable_operand(0), *reduction_identity); if (lhs == nullptr) { continue; } HloInstruction* rhs = LookThroughForAllReduce(inst->mutable_operand(1), *reduction_identity); if (rhs == nullptr) { continue; } if (!inst->shape().IsArray()) { continue; } if (lhs->opcode() != rhs->opcode() || (lhs->opcode() == HloOpcode::kDynamicSlice && !ShapeUtil::Compatible(lhs->operand(0)->shape(), rhs->operand(0)->shape()))) { continue; } HloAllReduceInstruction* ar0 = nullptr; HloAllReduceInstruction* ar1 = nullptr; bool reduce_scatter_pattern_match = false; if (lhs->opcode() == HloOpcode::kDynamicSlice) { HloInstruction* original_rhs_operand = rhs->mutable_operand(0); TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, lhs->mutable_operand(0))); if (!lhs->Identical(*rhs)) { TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand)); continue; } TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand)); ar0 = Cast<HloAllReduceInstruction>(lhs->mutable_operand(0)); ar1 = Cast<HloAllReduceInstruction>(rhs->mutable_operand(0)); reduce_scatter_pattern_match = true; } else { ar0 = Cast<HloAllReduceInstruction>(lhs); ar1 = Cast<HloAllReduceInstruction>(rhs); } if (!ReassociateAllReduceIsProfitable(lhs, rhs, inst)) { continue; } HloInstruction* convert0 = nullptr; HloInstruction* convert1 = nullptr; if (!MatchOperandsToAllReduceWithOptionalConvert(inst, &convert0, &convert1)) { VLOG(2) << "One or both inputs are type-converted."; } bool should_promote_ar = convert0 || convert1; if (should_promote_ar) { if (!reassociate_converted_ar_) { VLOG(2) << "Promotions of all_reduces for reassociation will be " "disabled."; continue; } if (!AreCompatibleConverts(convert0, convert1)) { VLOG(2) << "Inputs' Converts are not preserving " "value, skipping"; continue; } } HloInstruction* op_operand0 = inst->mutable_operand(0); HloInstruction* op_operand1 = inst->mutable_operand(1); if (convert0) { op_operand0 = convert0->mutable_operand(0); } if (convert1) { op_operand1 = convert1->mutable_operand(0); } if (!AreCompatible(ar0, ar1, *kind, should_promote_ar)) { VLOG(2) << "All-Reduce operations are not compatible, skipping"; continue; } VLOG(2) << "Reassociated:"; VLOG(2) << "\tAR0: " << ar0->ToString(); VLOG(2) << "\tAR1: " << ar1->ToString(); auto op_users = inst->users(); HloInstruction* new_op_operand0 = ar0->mutable_operand(0); HloInstruction* new_op_operand1 = ar1->mutable_operand(0); if (convert0) { HloInstruction* ar0_operand = ar0->mutable_operand(0); TF_RETURN_IF_ERROR(convert0->ReplaceOperandWith(0, ar0_operand)); new_op_operand0 = convert0; } if (convert1) { HloInstruction* ar1_operand = ar1->mutable_operand(0); TF_RETURN_IF_ERROR(convert1->ReplaceOperandWith(0, ar1_operand)); new_op_operand1 = convert1; } HloInstruction* new_op = inst; if (should_promote_ar) { new_op = computation->AddInstruction(inst->CloneWithNewOperands( inst->shape(), {new_op_operand0, new_op_operand1})); } else if (reduce_scatter_pattern_match) { new_op = computation->AddInstruction(inst->CloneWithNewOperands( ar0->shape(), {new_op_operand0, new_op_operand1})); } Shape new_ar_out_shape = inst->shape(); CHECK(!should_promote_ar || !reduce_scatter_pattern_match); if (should_promote_ar) { new_ar_out_shape.set_element_type( new_op_operand0->shape().element_type()); } else if (reduce_scatter_pattern_match) { new_ar_out_shape = ar0->shape(); } else { TF_RETURN_IF_ERROR(ar0->ReplaceAllUsesWith(ar0->mutable_operand(0))); TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(ar1->mutable_operand(0))); } HloInstruction* new_ar = computation->AddInstruction( ar0->CloneWithNewOperands(new_ar_out_shape, {new_op})); if (new_ar->channel_id()) { new_ar->set_channel_id(next_channel_id++); } if (should_promote_ar) { HloComputation* to_apply = new_ar->to_apply(); PrimitiveType type = new_ar->shape().element_type(); std::string name = absl::StrCat(to_apply->name(), "_reassoc_promoted"); HloComputation::Builder promoted(name); auto x = promoted.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(type, {}), "x")); auto y = promoted.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(type, {}), "y")); promoted.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(type, {}), to_apply->root_instruction()->opcode(), x, y)); HloComputation* to_apply_promoted = inst->GetModule()->AddEmbeddedComputation(promoted.Build()); new_ar->set_to_apply(to_apply_promoted); TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_ar)); } else if (reduce_scatter_pattern_match) { auto dyn_slice_operands = lhs->mutable_operands(); dyn_slice_operands[0] = new_ar; HloInstruction* new_dyn_slice = inst->parent()->AddInstruction( lhs->CloneWithNewOperands(inst->shape(), dyn_slice_operands)); TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_dyn_slice)); } else { TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_ar)); } if (should_promote_ar || reduce_scatter_pattern_match) { TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst)); } if (reduce_scatter_pattern_match) { TF_RETURN_IF_ERROR(computation->RemoveInstruction(lhs)); if (lhs != rhs) { TF_RETURN_IF_ERROR(computation->RemoveInstruction(rhs)); } } TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0)); if (ar0 != ar1) { TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1)); } changed = true; } } return changed; } }
#include "xla/service/all_reduce_reassociate.h" #include <cstddef> #include <memory> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace m = xla::testing::opcode_matchers; using ::testing::_; class AllReduceSimplifierTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<HloModule>> RunPass( absl::string_view hlo_module, bool expect_change, bool reassociate_converted_ar = false) { TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module)); auto changed = AllReduceReassociate(reassociate_converted_ar).Run(module.get()); if (!changed.ok()) { return changed.status(); } EXPECT_EQ(changed.value(), expect_change); return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module)); } size_t AllReduceCount(std::unique_ptr<HloModule>& module) { return absl::c_count_if(module->entry_computation()->instructions(), HloPredicateIsOp<HloOpcode::kAllReduce>); } }; TEST_F(AllReduceSimplifierTest, Simple) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum ROOT add = f32[8] add(ar0, ar1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1)))); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, SimpleWithChannelId) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), channel_id=1, replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), channel_id=1, replica_groups={}, to_apply=sum ROOT add = f32[8] add(ar0, ar1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1)))); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, SimpleChain) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) p2 = f32[8] parameter(2) p3 = f32[8] parameter(3) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum add0 = f32[8] add(ar0, ar1) add1 = f32[8] add(add0, ar2) ROOT add2 = f32[8] add(add1, ar3) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT( module->entry_computation()->root_instruction(), m::AllReduce(m::Add( m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)), m::Parameter(3)))); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, SimpleTree) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) p2 = f32[8] parameter(2) p3 = f32[8] parameter(3) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum add0 = f32[8] add(ar0, ar1) add1 = f32[8] add(ar2, ar3) ROOT add2 = f32[8] add(add0, add1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Add(m::Parameter(2), m::Parameter(3))))); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, MismatchOp0) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } max { a = f32[] parameter(0) b = f32[] parameter(1) ROOT r = f32[] maximum(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max ROOT add = f32[8] add(ar0, ar1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(AllReduceSimplifierTest, MismatchOp1) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } max { a = f32[] parameter(0) b = f32[] parameter(1) ROOT r = f32[] maximum(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=max ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max ROOT add = f32[8] add(ar0, ar1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(AllReduceSimplifierTest, MismatchReplicaGroups) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={{0}}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum ROOT add = f32[8] add(ar0, ar1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(AllReduceSimplifierTest, MismatchHasChannelId) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, channel_id=3, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum ROOT add = f32[8] add(ar0, ar1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(AllReduceSimplifierTest, MismatchUseGlobalDeviceId) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={{0, 1}}, channel_id=3, use_global_device_ids=true, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={{0, 1}}, channel_id=4, to_apply=sum ROOT add = f32[8] add(ar0, ar1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(AllReduceSimplifierTest, NotSingleUser) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum add = f32[8] add(ar0, ar1) ROOT t = (f32[8], f32[8]) tuple(ar0, add) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); } TEST_F(AllReduceSimplifierTest, DoubleUse) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum add = f32[8] add(ar0, ar0) ROOT c = f32[8] copy(add) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); } TEST_F(AllReduceSimplifierTest, PaddedUse) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum %constant.1 = f32[] constant(0) pad = f32[12]{0} pad(ar0, constant.1), padding=0_4 pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4 ROOT add = f32[12] add(pad, pad.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::AllReduce(m::Add(m::Pad(m::Parameter(0), _), m::Pad(m::Parameter(1), _)))); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, PaddedUseInvalidReduceValue) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum %constant.1 = f32[] constant(-1.0) pad = f32[12]{0} pad(ar0, constant.1), padding=0_4 pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4 ROOT add = f32[12] add(pad, pad.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); EXPECT_EQ(AllReduceCount(module), 2); } TEST_F(AllReduceSimplifierTest, PaddedUseNotProfitable) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum %constant.1 = f32[] constant(0) pad = f32[17]{0} pad(ar0, constant.1), padding=0_9 pad.1 = f32[17]{0} pad(ar1, constant.1), padding=0_9 ROOT add = f32[17] add(pad, pad.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); EXPECT_EQ(AllReduceCount(module), 2); } TEST_F(AllReduceSimplifierTest, PaddedUseDoubleUseNotProfitable) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum %constant.1 = f32[] constant(0) pad = f32[9]{0} pad(ar0, constant.1), padding=0_1 ROOT add = f32[9] add(pad, pad) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, ReshapeUse) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[1,8] parameter(0) p1 = f32[1,8] parameter(1) ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum rshp0 = f32[8]{0} reshape(ar0) rshp1 = f32[8]{0} reshape(ar1) ROOT add = f32[8] add(rshp0, rshp1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::AllReduce(m::Add(m::Reshape(m::Parameter(0)), m::Reshape(m::Parameter(1))))); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, SliceUse) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum rshp0 = f32[4]{0} slice(ar0), slice={[0:4]} rshp1 = f32[4]{0} slice(ar1), slice={[0:4]} ROOT add = f32[4] add(rshp0, rshp1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::AllReduce(m::Add(m::Slice(m::Parameter(0)), m::Slice(m::Parameter(1))))); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, ChainWithConvert) { absl::string_view hlo_string = R"( HloModule m add.1 { x.47 = bf16[] parameter(0) y.47 = bf16[] parameter(1) ROOT add.2532 = bf16[] add(x.47, y.47) } ENTRY main { p0 = bf16[8] parameter(0) p1 = bf16[8] parameter(1) p2 = bf16[8] parameter(2) p3 = bf16[8] parameter(3) ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1 ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1 ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1 ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1 convert0 = f32[8] convert(ar0) convert1 = f32[8] convert(ar1) add0 = f32[8] add(convert0, convert1) convert2 = f32[8] convert(ar2) add1 = f32[8] add(add0, convert2) convert3 = f32[8] convert(ar3) add2 = f32[8] add(add1, convert3) ROOT convert4 = bf16[8] convert(add2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true, true)); SCOPED_TRACE(module->ToString()); EXPECT_THAT( module->entry_computation()->root_instruction(), m::Convert(m::AllReduce(m::Add(m::Add(m::Add(m::Convert(m::Parameter(0)), m::Convert(m::Parameter(1))), m::Convert(m::Parameter(2))), m::Convert(m::Parameter(3)))))); EXPECT_EQ(AllReduceCount(module), 1); EXPECT_THAT( module->entry_computation()->root_instruction()->operand(0)->shape(), GmockMatch(::xla::match::Shape().WithElementType(F32))); } TEST_F(AllReduceSimplifierTest, AllreduceWithConvertIncompatibleType) { absl::string_view hlo_string = R"( HloModule m add.1 { x.47 = bf16[] parameter(0) y.47 = bf16[] parameter(1) ROOT add.2532 = bf16[] add(x.47, y.47) } max.1 { x.48 = bf16[] parameter(0) y.48 = bf16[] parameter(1) ROOT max.2533 = bf16[] maximum(x.48, y.48) } min.1 { x.49 = bf16[] parameter(0) y.49 = bf16[] parameter(1) ROOT min.2534 = bf16[] minimum(x.49, y.49) } mul.1 { x.50 = bf16[] parameter(0) y.50 = bf16[] parameter(1) ROOT mul.2535 = bf16[] multiply(x.50, y.50) } ENTRY main { p0 = bf16[8] parameter(0) p1 = bf16[8] parameter(1) p2 = bf16[8] parameter(2) p3 = bf16[8] parameter(3) ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1 ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=max.1 ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=min.1 ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=mul.1 convert0 = f32[8] convert(ar0) convert1 = f32[8] convert(ar1) add0 = f32[8] add(convert0, convert1) convert2 = f32[8] convert(ar2) add1 = f32[8] add(add0, convert2) convert3 = f32[8] convert(ar3) add2 = f32[8] add(add1, convert3) ROOT convert4 = bf16[8] convert(add2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); SCOPED_TRACE(module->ToString()); } TEST_F(AllReduceSimplifierTest, AllreduceWithLossyConvert) { absl::string_view hlo_string = R"( HloModule m add.1 { x.47 = bf16[] parameter(0) y.47 = bf16[] parameter(1) ROOT add.2532 = bf16[] add(x.47, y.47) } ENTRY main { p0 = bf16[8] parameter(0) p1 = bf16[8] parameter(1) p2 = bf16[8] parameter(2) p3 = bf16[8] parameter(3) ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1 ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1 ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1 ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1 convert0 = u32[8] convert(ar0) convert1 = u32[8] convert(ar1) add0 = u32[8] add(convert0, convert1) convert2 = u32[8] convert(ar2) add1 = u32[8] add(add0, convert2) convert3 = u32[8] convert(ar3) add2 = u32[8] add(add1, convert3) ROOT convert4 = bf16[8] convert(add2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, false)); SCOPED_TRACE(module->ToString()); } TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePattern) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[1,8] parameter(0) p1 = f32[1,8] parameter(1) p2 = f32[1,8] parameter(2) p3 = s32[] parameter(3) cst = s32[] constant(0) ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum ar2 = f32[1,8] all-reduce(p2), replica_groups={}, to_apply=sum dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4} dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4} dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4} add = f32[1,4] add(dyn0, dyn1) ROOT add1 = f32[1,4] add(add, dyn2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::DynamicSlice( m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2))), m::Constant(), m::Parameter(3))); XLA_VLOG_LINES(1, module->ToString()); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePatternSameOperand) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[1,8] parameter(0) p1 = f32[1,8] parameter(1) p2 = s32[] parameter(2) cst = s32[] constant(0) ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum ar2 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum dyn0 = f32[1,4] dynamic-slice(ar0, cst, p2), dynamic_slice_sizes={1,4} dyn2 = f32[1,4] dynamic-slice(ar2, cst, p2), dynamic_slice_sizes={1,4} add = f32[1,4] add(dyn0, dyn0) ROOT add1 = f32[1,4] add(add, dyn2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), m::DynamicSlice( m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(0)), m::Parameter(1))), m::Constant(), m::Parameter(2))); XLA_VLOG_LINES(1, module->ToString()); EXPECT_EQ(AllReduceCount(module), 1); } TEST_F(AllReduceSimplifierTest, AllReduceDynamicSliceDifferentSlices) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[1,8] parameter(0) p1 = f32[1,8] parameter(1) p2 = f32[1,16] parameter(2) p3 = s32[] parameter(3) cst = s32[] constant(0) ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum ar2 = f32[1,16] all-reduce(p2), replica_groups={}, to_apply=sum dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4} dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4} dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4} add = f32[1,4] add(dyn0, dyn1) ROOT add1 = f32[1,4] add(add, dyn2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, true)); EXPECT_THAT( module->entry_computation()->root_instruction(), m::Add(m::DynamicSlice(), m::DynamicSlice(m::AllReduce(), m::Constant(), m::Parameter(3)))); XLA_VLOG_LINES(1, module->ToString()); EXPECT_EQ(AllReduceCount(module), 2); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1b0d1084-8d5e-4768-bdf2-88e35ad4abd6
cpp
tensorflow/tensorflow
batch_dot_simplification
third_party/xla/xla/service/batch_dot_simplification.cc
third_party/xla/xla/service/batch_dot_simplification_test.cc
#include "xla/service/batch_dot_simplification.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { absl::StatusOr<bool> BatchDotSimplification::ElideDegenerateBatchDimensionFromBatchDot( HloInstruction* batch_dot) { if (Cast<HloDotInstruction>(batch_dot)->sparse_operands()) { return false; } const auto& is_iota = [](absl::Span<const int64_t> dims) { for (int64_t i = 0; i < dims.size(); ++i) { if (dims[i] != i) { return false; } } return true; }; if (!absl::c_equal( batch_dot->dot_dimension_numbers().lhs_batch_dimensions(), batch_dot->dot_dimension_numbers().rhs_batch_dimensions()) || !is_iota(batch_dot->dot_dimension_numbers().lhs_batch_dimensions())) { return false; } const DotDimensionNumbers& dim_numbers = batch_dot->dot_dimension_numbers(); HloInstruction *lhs = batch_dot->mutable_operand(0), *rhs = batch_dot->mutable_operand(1); const Shape& lhs_shape = lhs->shape(); if (dim_numbers.lhs_contracting_dimensions_size() != 1) { return false; } std::vector<int64_t> degenerate_dims; for (int64_t batch_dim : dim_numbers.lhs_batch_dimensions()) { if (lhs_shape.dimensions(batch_dim) == 1) { degenerate_dims.push_back(batch_dim); } } if (degenerate_dims.empty()) { return false; } TF_ASSIGN_OR_RETURN(HloInstruction * new_lhs, ElideDegenerateDims(lhs, degenerate_dims)); TF_ASSIGN_OR_RETURN(HloInstruction * new_rhs, ElideDegenerateDims(rhs, degenerate_dims)); DotDimensionNumbers new_dim_numbers = dim_numbers; new_dim_numbers.clear_lhs_batch_dimensions(); new_dim_numbers.clear_rhs_batch_dimensions(); for (int64_t i = 0, e = dim_numbers.lhs_batch_dimensions_size() - degenerate_dims.size(); i < e; i++) { new_dim_numbers.add_lhs_batch_dimensions(i); new_dim_numbers.add_rhs_batch_dimensions(i); } new_dim_numbers.set_lhs_contracting_dimensions( 0, new_dim_numbers.lhs_contracting_dimensions(0) - degenerate_dims.size()); new_dim_numbers.set_rhs_contracting_dimensions( 0, new_dim_numbers.rhs_contracting_dimensions(0) - degenerate_dims.size()); TF_ASSIGN_OR_RETURN( HloInstruction * new_dot, MakeDotHlo(new_lhs, new_rhs, new_dim_numbers, batch_dot->precision_config(), batch_dot->shape().element_type())); TF_ASSIGN_OR_RETURN(HloInstruction * new_dot_reshaped, MakeReshapeHlo(batch_dot->shape(), new_dot)); VLOG(2) << "Replaced " << batch_dot->ToString() << " with " << new_dot->ToString(); TF_RETURN_IF_ERROR( batch_dot->parent()->ReplaceInstruction(batch_dot, new_dot_reshaped)); return true; } absl::StatusOr<bool> BatchDotSimplification::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; std::vector<HloInstruction*> dot_instrs; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { absl::c_copy_if(computation->instructions(), std::back_inserter(dot_instrs), [](HloInstruction* instr) { return instr->opcode() == HloOpcode::kDot; }); } for (HloInstruction* dot_instr : dot_instrs) { TF_ASSIGN_OR_RETURN(bool elided_batch_dim_from_one, ElideDegenerateBatchDimensionFromBatchDot(dot_instr)); changed |= elided_batch_dim_from_one; } return changed; } }
#include "xla/service/batch_dot_simplification.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class BatchDotSimplificationTest : public HloTestBase {}; TEST_F(BatchDotSimplificationTest, ElideSingleDegenerateBatchDotDim_VectorVector) { const std::string hlo_text = R"( HloModule BatchDot main { a = f32[1,3] parameter(0) b = f32[1,3] parameter(1) ROOT dot = f32[1] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_TRUE(pass.Run(m.get()).value()); HloInstruction* root = m->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Dot( op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)), 0, 0))); } TEST_F(BatchDotSimplificationTest, ElideSingleDegenerateBatchDotDim_MatrixVector) { const std::string hlo_text = R"( HloModule BatchDot main { a = f32[1,9,3] parameter(0) b = f32[1,3] parameter(1) ROOT dot = f32[1,9] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_TRUE(pass.Run(m.get()).value()); HloInstruction* root = m->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Dot( op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)), 1, 0))); } TEST_F(BatchDotSimplificationTest, ElideSingleDegenerateBatchDotDim_MatrixMatrix) { const std::string hlo_text = R"( HloModule BatchDot main { a = f32[1,9,3] parameter(0) b = f32[1,3,7] parameter(1) ROOT dot = f32[1,9,7] dot(a, b), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_TRUE(pass.Run(m.get()).value()); HloInstruction* root = m->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Dot( op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)), 1, 0))); } TEST_F(BatchDotSimplificationTest, ElideMultipleDegenerateBatchDotDims_VectorVector) { const std::string hlo_text = R"( HloModule BatchDot main { a = f32[9,1,7,1,3] parameter(0) b = f32[9,1,7,1,3] parameter(1) ROOT dot = f32[9,1,7,1] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={4} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_TRUE(pass.Run(m.get()).value()); HloInstruction* root = m->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Dot( op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)), 2, 2))); } TEST_F(BatchDotSimplificationTest, ElideMultipleDegenerateBatchDotDims_VectorMatrix) { const std::string hlo_text = R"( HloModule BatchDot main { a = f32[9,1,7,1,3] parameter(0) b = f32[9,1,7,1,20,3] parameter(1) ROOT dot = f32[9,1,7,1,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={4}, rhs_contracting_dims={5} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_TRUE(pass.Run(m.get()).value()); HloInstruction* root = m->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Dot( op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)), 2, 3))); } TEST_F(BatchDotSimplificationTest, ElideMultipleDegenerateBatchDotDims_MatrixMatrix) { const std::string hlo_text = R"( HloModule BatchDot main { a = f32[9,1,7,1,19,3] parameter(0) b = f32[9,1,7,1,3,20] parameter(1) ROOT dot = f32[9,1,7,1,19,20] dot(a, b), lhs_batch_dims={0,1,2,3}, rhs_batch_dims={0,1,2,3}, lhs_contracting_dims={5}, rhs_contracting_dims={4} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_TRUE(pass.Run(m.get()).value()); HloInstruction* root = m->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Dot( op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)), 3, 2))); } TEST_F(BatchDotSimplificationTest, ElideMultipleDegenerateBatchDotDimsNonContracting) { const char* hlo_text = R"( HloModule BatchDot main { a = f32[1,101] parameter(0) b = f32[1,101] parameter(1) ROOT dot = f32[1,101,101] dot(a,b), lhs_batch_dims={0}, lhs_contracting_dims={}, rhs_batch_dims={0}, rhs_contracting_dims={} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_FALSE(pass.Run(m.get()).value()); } TEST_F(BatchDotSimplificationTest, ElideMultipleDegenerateBatchDotDimsMultipleContracting) { const char* hlo_text = R"( HloModule BatchDot main { lhs = f32[1,5,17,10,13] parameter(0) rhs = f32[1,9,10,13,6,5] parameter(1) ROOT dot = f32[10,1,17,9,6] dot(lhs,rhs), lhs_batch_dims={3,0}, rhs_batch_dims={2,0}, lhs_contracting_dims={1,4}, rhs_contracting_dims={5,3} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); BatchDotSimplification pass; ASSERT_FALSE(pass.Run(m.get()).value()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/batch_dot_simplification_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
51a7fed9-c52a-4155-b34f-5a1bf3110f66
cpp
tensorflow/tensorflow
hlo_liveness_analysis
third_party/xla/xla/service/hlo_liveness_analysis.cc
third_party/xla/xla/service/hlo_liveness_analysis_test.cc
#include "xla/service/hlo_liveness_analysis.h" #include <cstddef> #include <cstdint> #include <deque> #include <functional> #include <memory> #include "absl/container/flat_hash_set.h" #include "absl/functional/function_ref.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/call_graph.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { namespace { using Worklist = std::deque<const HloInstruction*>; using Workset = absl::flat_hash_set<const HloInstruction*>; void AddToWorklist(const HloInstruction* instruction, Worklist* worklist, Workset* workset) { if (workset->insert(instruction).second) { worklist->push_back(instruction); VLOG(3) << "ADD instruction: " << instruction->name(); } } using VisitorFunction = absl::FunctionRef<void(const ShapeIndex& )>; void ForEachLiveIndex(const ShapeTree<bool>& index_tree, VisitorFunction func) { index_tree.ForEachElement([&](const ShapeIndex& shape_index, bool live) { if (live) { func(shape_index); } }); } void MarkLiveAtIndex(const HloInstruction* instruction, const ShapeIndex& shape_index, HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist, Workset* workset) { std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction]; if (liveness == nullptr) { liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(), false); } bool& alive = *liveness->mutable_element(shape_index); if (!alive) { AddToWorklist(instruction, worklist, workset); alive = true; VLOG(3) << "MARK instruction: " << instruction->name() << " shape_index: " << shape_index; } } void MarkLiveAtAllIndices(const HloInstruction* instruction, HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist, Workset* workset) { bool add_to_worklist = false; std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction]; if (liveness == nullptr) { liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(), true); add_to_worklist = true; } else { for (auto& entry : *liveness) { if (!entry.second) { add_to_worklist = true; entry.second = true; VLOG(3) << "MARK instruction: " << instruction->name() << " shape_index: " << entry.first; } } } if (add_to_worklist) { AddToWorklist(instruction, worklist, workset); } } void PropagateLivenessThroughTuple( const HloInstruction* instruction, HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist, Workset* workset) { CHECK_EQ(instruction->opcode(), HloOpcode::kTuple); const ShapeTree<bool>& index_tree = *live_index_map->at(instruction); ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) { const size_t size = shape_index.size(); if (size == 0) { return; } const int64_t operand_index = shape_index[0]; if (operand_index >= instruction->operand_count()) { return; } MarkLiveAtIndex(instruction->operand(operand_index), {}, live_index_map, worklist, workset); ShapeIndex operand_shape_index(size - 1); for (int i = 1; i < size; ++i) { operand_shape_index[i - 1] = shape_index[i]; } MarkLiveAtIndex(instruction->operand(operand_index), operand_shape_index, live_index_map, worklist, workset); }); } void PropagateLivenessThroughGTE( const HloInstruction* instruction, HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist, Workset* workset) { CHECK_EQ(instruction->opcode(), HloOpcode::kGetTupleElement); MarkLiveAtIndex(instruction->operand(0), {}, live_index_map, worklist, workset); const ShapeTree<bool>& index_tree = *live_index_map->at(instruction); ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) { ShapeIndex operand_shape_index(shape_index); operand_shape_index.push_front(instruction->tuple_index()); MarkLiveAtIndex(instruction->operand(0), operand_shape_index, live_index_map, worklist, workset); }); } void PropagateLivenessThroughWhile( const HloInstruction* instruction, HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist, Workset* workset) { CHECK_EQ(instruction->opcode(), HloOpcode::kWhile); const ShapeTree<bool>& index_tree = *live_index_map->at(instruction); ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) { MarkLiveAtIndex(instruction->while_body()->root_instruction(), shape_index, live_index_map, worklist, workset); MarkLiveAtIndex(instruction->operand(0), shape_index, live_index_map, worklist, workset); }); MarkLiveAtIndex(instruction->while_condition()->root_instruction(), {}, live_index_map, worklist, workset); } void PropagateLivenessToParameterCallers( const HloInstruction* instruction, HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist, Workset* workset, CallGraph* call_graph) { CHECK_EQ(instruction->opcode(), HloOpcode::kParameter); const CallGraphNode& call_graph_node = call_graph->GetNode(instruction->parent()); if (call_graph_node.context() == CallContext::kControlFlow) { for (const CallSite& callsite : call_graph_node.caller_callsites()) { if (callsite.instruction()->opcode() == HloOpcode::kWhile) { auto* xla_while = callsite.instruction(); const ShapeTree<bool>& index_tree = *live_index_map->at(instruction); ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) { MarkLiveAtIndex(xla_while, shape_index, live_index_map, worklist, workset); MarkLiveAtIndex(xla_while->while_body()->root_instruction(), shape_index, live_index_map, worklist, workset); MarkLiveAtIndex(xla_while->operand(0), shape_index, live_index_map, worklist, workset); }); } } } } void PropagateLivenessThroughControlFlow( const HloInstruction* instruction, HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist, Workset* workset, CallGraph* call_graph) { const CallGraphNode& call_graph_node = call_graph->GetNode(instruction->parent()); if (call_graph_node.context() == CallContext::kControlFlow) { for (const CallSite& callsite : call_graph_node.caller_callsites()) { HloInstruction* caller = callsite.instruction(); if (caller->opcode() == HloOpcode::kWhile) { MarkLiveAtIndex(caller->while_condition()->root_instruction(), {}, live_index_map, worklist, workset); } else if (caller->opcode() == HloOpcode::kConditional) { MarkLiveAtIndex(caller->operand(0), {}, live_index_map, worklist, workset); MarkLiveAtIndex(caller, {}, live_index_map, worklist, workset); const HloComputation* callee_comp = instruction->parent(); int64_t operand_index = 1; for (auto* caller_comp : caller->called_computations()) { if (callee_comp == caller_comp) { MarkLiveAtIndex(caller->operand(operand_index), {}, live_index_map, worklist, workset); if (instruction->opcode() == HloOpcode::kParameter) { const ShapeTree<bool>& index_tree = *live_index_map->at(instruction); ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) { MarkLiveAtIndex(caller->operand(operand_index), shape_index, live_index_map, worklist, workset); }); } break; } ++operand_index; } } } } } } HloLivenessAnalysis::HloLivenessAnalysis(const HloModule& module) : module_(module), call_graph_(CallGraph::Build(&module)) {} void HloLivenessAnalysis::RunAnalysis() { Worklist worklist; Workset workset; MarkLiveAtAllIndices(module_.entry_computation()->root_instruction(), &live_index_map_, &worklist, &workset); for (auto* computation : module_.computations()) { for (auto* instruction : computation->instructions()) { if (instruction->HasSideEffectNoRecurse()) { MarkLiveAtAllIndices(instruction, &live_index_map_, &worklist, &workset); } } } while (!worklist.empty()) { const HloInstruction* instruction = worklist.front(); worklist.pop_front(); workset.erase(workset.find(instruction)); VLOG(1) << "VISIT instruction: " << instruction->name(); if (instruction->opcode() == HloOpcode::kTuple) { PropagateLivenessThroughTuple(instruction, &live_index_map_, &worklist, &workset); } else if (instruction->opcode() == HloOpcode::kGetTupleElement) { PropagateLivenessThroughGTE(instruction, &live_index_map_, &worklist, &workset); } else if (instruction->opcode() == HloOpcode::kWhile) { PropagateLivenessThroughWhile(instruction, &live_index_map_, &worklist, &workset); } else if (instruction->opcode() == HloOpcode::kParameter) { PropagateLivenessToParameterCallers(instruction, &live_index_map_, &worklist, &workset, call_graph_.get()); } else { for (auto* called_computation : instruction->called_computations()) { MarkLiveAtAllIndices(called_computation->root_instruction(), &live_index_map_, &worklist, &workset); } for (HloInstruction* operand : instruction->operands()) { MarkLiveAtAllIndices(operand, &live_index_map_, &worklist, &workset); } } PropagateLivenessThroughControlFlow(instruction, &live_index_map_, &worklist, &workset, call_graph_.get()); } } bool HloLivenessAnalysis::IsLive(const HloInstruction* instruction, const ShapeIndex& shape_index) const { auto it = live_index_map_.find(instruction); return (it != live_index_map_.end()) && it->second->element(shape_index); } absl::StatusOr<std::unique_ptr<HloLivenessAnalysis>> HloLivenessAnalysis::Run( const HloModule& module) { VLOG(1) << "HloLivenessAnalysis::Run on module " << module.name(); XLA_VLOG_LINES(2, module.ToString()); auto liveness_analysis = absl::WrapUnique(new HloLivenessAnalysis(module)); liveness_analysis->RunAnalysis(); return std::move(liveness_analysis); } }
#include "xla/service/hlo_liveness_analysis.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/logging.h" #include "tsl/platform/test.h" namespace xla { namespace { class HloLivenessAnalysisTest : public HloTestBase { protected: HloLivenessAnalysisTest() {} const HloLivenessAnalysis& RunLiveness(HloModule* module) { liveness_ = HloLivenessAnalysis::Run(*module).value(); return *liveness_; } HloInstruction* GetInstruction(HloModule* module, const std::string& name) { HloInstruction* to_return = nullptr; for (auto* comp : module->computations()) { for (auto* inst : comp->instructions()) { if (inst->name() == name) { to_return = inst; break; } } } return CHECK_NOTNULL(to_return); } std::unique_ptr<HloLivenessAnalysis> liveness_; }; TEST_F(HloLivenessAnalysisTest, AddAtEntryRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleModule ENTRY SimpleComputation { constant.1 = s32[] constant(0) constant.2 = s32[] constant(1) ROOT add = s32[] add(constant.1, constant.2) })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {})); } TEST_F(HloLivenessAnalysisTest, DeadAdd) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleModule ENTRY SimpleComputation { constant.1 = s32[] constant(0) constant.2 = s32[] constant(1) add.1 = s32[] add(constant.1, constant.2) ROOT add.2 = s32[] add(constant.1, constant.2) })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {})); } TEST_F(HloLivenessAnalysisTest, TupleAtEntryRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleModule ENTRY SimpleComputation { constant.1 = s32[] constant(0) constant.2 = s32[] constant(1) ROOT tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2) })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {})); } TEST_F(HloLivenessAnalysisTest, NestedTupleAtEntryRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleModule ENTRY SimpleComputation { constant.1 = s32[] constant(1) constant.2 = s32[] constant(2) constant.3 = s32[] constant(3) tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3) ROOT tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1) })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {})); } TEST_F(HloLivenessAnalysisTest, GteOfTuple) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleModule ENTRY SimpleComputation { constant.1 = s32[] constant(0) constant.2 = s32[] constant(1) tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2) ROOT get-tuple-element.1 = s32[] get-tuple-element(tuple.1), index=0 })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {})); } TEST_F(HloLivenessAnalysisTest, GteOfNestedTuple) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleModule ENTRY SimpleComputation { constant.1 = s32[] constant(0) constant.2 = s32[] constant(1) constant.3 = s32[] constant(2) tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3) tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1) ROOT get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1 })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {})); EXPECT_TRUE(liveness.IsLive( GetInstruction(module.get(), "get-tuple-element.1"), {0})); EXPECT_TRUE(liveness.IsLive( GetInstruction(module.get(), "get-tuple-element.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {})); } TEST_F(HloLivenessAnalysisTest, GteOfGteOfNestedTuple) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleModule ENTRY SimpleComputation { constant.1 = s32[] constant(0) constant.2 = s32[] constant(1) constant.3 = s32[] constant(2) tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3) tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1) get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1 ROOT get-tuple-element.2 = s32[] get-tuple-element(get-tuple-element.1), index=0 })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.2"), {})); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {})); EXPECT_TRUE(liveness.IsLive( GetInstruction(module.get(), "get-tuple-element.1"), {0})); EXPECT_FALSE(liveness.IsLive( GetInstruction(module.get(), "get-tuple-element.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0})); EXPECT_FALSE( liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {})); } TEST_F(HloLivenessAnalysisTest, WhileWithDeadTupleElement) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleLoop SimpleLoop.body { loop_var.1 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add.0 = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2) ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0) } SimpleLoop.condition { loop_var.2 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0 constant.2 = s32[] constant(5) ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT } ENTRY SimpleLoop { constant.3 = s32[] constant(0) constant.4 = s32[3]{0} constant({0, 1, 2}) tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4) while.0 = (s32[], s32[3]{0}) while(tuple.1), condition= SimpleLoop.condition, body=SimpleLoop.body ROOT get-tuple-element.4 = s32[] get-tuple-element(while.0), index=0 })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.4"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.0"), {})); EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {})); } TEST_F(HloLivenessAnalysisTest, WhileCondPropagatesLiveness) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleLoop add_S32 { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } SimpleLoop.body { loop_var.1 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 constant.1 = s32[] constant(1) add.0 = s32[] add(get-tuple-element.1, constant.1) get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1 multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2) ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0) } SimpleLoop.condition { loop_var.2 = (s32[], s32[3]{0}) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0 get-tuple-element.4 = s32[3]{0} get-tuple-element(loop_var.2), index=1 zero = s32[] constant(0) reduce = s32[] reduce(get-tuple-element.4, zero), dimensions={0}, to_apply=add_S32 add.1 = s32[] add(get-tuple-element.3, reduce) constant.2 = s32[] constant(5) ROOT less-than = pred[] compare(add.1, constant.2), direction=LT } ENTRY SimpleLoop { constant.3 = s32[] constant(0) constant.4 = s32[3]{0} constant({0, 1, 2}) tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4) while.0 = (s32[], s32[3]{0}) while(tuple.1), condition= SimpleLoop.condition, body=SimpleLoop.body ROOT get-tuple-element.5 = s32[] get-tuple-element(while.0), index=0 })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.4"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {})); } TEST_F(HloLivenessAnalysisTest, WhileWithLiveTupleElements) { auto module = ParseAndReturnVerifiedModule(R"( HloModule SimpleLoop SimpleLoop.body { loop_var.1 = (s32[], s32[], s32[]) parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0 get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1 add.1 = s32[] add(get-tuple-element.1, get-tuple-element.2) get-tuple-element.3 = s32[] get-tuple-element(loop_var.1), index=2 multiply.1 = s32[] multiply(get-tuple-element.3, get-tuple-element.3) ROOT tuple.1 = (s32[], s32[], s32[]) tuple(add.1, get-tuple-element.3, multiply.1) } SimpleLoop.condition { loop_var.2 = (s32[], s32[], s32[]) parameter(0) get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0 constant.1 = s32[] constant(5) ROOT less-than = pred[] compare(get-tuple-element.4, constant.1), direction=LT } ENTRY SimpleLoop { constant.2 = s32[] constant(0) constant.3 = s32[] constant(1) constant.4 = s32[] constant(2) tuple.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.3, constant.4) while.1 = (s32[], s32[], s32[]) while(tuple.2), condition= SimpleLoop.condition, body=SimpleLoop.body ROOT get-tuple-element.5 = s32[] get-tuple-element(while.1), index=0 })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {2})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {2})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {2})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {0})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {1})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {2})); } TEST_F(HloLivenessAnalysisTest, WhileWithOutfeed) { auto module = ParseAndReturnVerifiedModule(R"( HloModule OutfeedLoop WhileBody { body_param = (s32[]) parameter(0) token0 = token[] after-all() constant.2 = s32[] constant(2) outfeed_tuple = (s32[]) outfeed(constant.2, token0) get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) ROOT tuple = (s32[]) tuple(add) } WhileCondition { cond_param = (s32[]) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0 constant.2 = s32[] constant(10) ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT } ENTRY SimpleLoop { constant.3 = s32[] constant(0) tuple.1 = (s32[]) tuple(constant.3) while = (s32[]) while(tuple.1), condition=WhileCondition, body=WhileBody ROOT rtuple = () tuple() })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {})); } TEST_F(HloLivenessAnalysisTest, NestedWhileWithOutfeed) { auto module = ParseAndReturnVerifiedModule(R"( HloModule OutfeedLoop InnerWhileBody { body_param = (s32[]) parameter(0) token0 = token[] after-all() constant.2 = s32[] constant(2) outfeed_tuple = (s32[]) outfeed(constant.2, token0) get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0 constant.1 = s32[] constant(1) add = s32[] add(get-tuple-element.1, constant.1) ROOT tuple = (s32[]) tuple(add) } InnerWhileCondition { cond_param = (s32[]) parameter(0) get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0 constant.2 = s32[] constant(10) ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT } OuterWhileCondition { cond_param.2 = (s32[]) parameter(0) get-tuple-element.5 = s32[] get-tuple-element(cond_param.2), index=0 constant.5 = s32[] constant(5) ROOT less-than.2 = pred[] compare(get-tuple-element.5, constant.5), direction=LT } OuterWhileBody { body_param.2 = (s32[]) parameter(0) get-tuple-element.8 = s32[] get-tuple-element(body_param.2), index=0 constant.6 = s32[] constant(0) tuple.2 = (s32[]) tuple(constant.6) inner_while = (s32[]) while(tuple.2), condition=InnerWhileCondition, body=InnerWhileBody constant.7 = s32[] constant(1) add.2 = s32[] add(get-tuple-element.8, constant.7) ROOT rtuple = (s32[]) tuple(add.2) } ENTRY SimpleLoop { constant.3 = s32[] constant(0) tuple.1 = (s32[]) tuple(constant.3) while = (s32[]) while(tuple.1), condition=OuterWhileCondition, body=OuterWhileBody ROOT rtuple = () tuple() })") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {})); } TEST_F(HloLivenessAnalysisTest, PropagateLivenessFromConditionalComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule main.67 %region_0.10 (Arg_0.11: (s32[], s32[], f32[1024,3], s32[1])) -> (s32[], s32[], f32[1024,3], s32[1]) { %Arg_0.11 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0) %get-tuple-element.17 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=0, metadata={op_name="while"} %constant.13 = s32[] constant(1) %add.25 = s32[] add(s32[] %get-tuple-element.17, s32[] %constant.13), metadata={op_name="while/add_1"} %get-tuple-element.18 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=1, metadata={op_name="while"} %add.22 = s32[] add(s32[] %get-tuple-element.18, s32[] %constant.13), metadata={op_name="while/add"} %get-tuple-element.19 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=2, metadata={op_name="while"} %constant.16 = f32[] constant(0) %constant.15 = f32[] constant(1) %rng.21 = f32[3]{0} rng(f32[] %constant.16, f32[] %constant.15), distribution=rng_uniform, metadata={op_name="while/random_uniform/RandomUniform"} %reshape.23 = f32[1,3]{1,0} reshape(f32[3]{0} %rng.21), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"} %constant.12 = s32[] constant(0) %dynamic-update-slice.24 = f32[1024,3]{1,0} dynamic-update-slice(f32[1024,3]{1,0} %get-tuple-element.19, f32[1,3]{1,0} %reshape.23, s32[] %get-tuple-element.18, s32[] %constant.12), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"} %get-tuple-element.20 = s32[1]{0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=3, metadata={op_name="while"} ROOT %tuple.26 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %add.25, s32[] %add.22, f32[1024,3]{1,0} %dynamic-update-slice.24, s32[1]{0} %get-tuple-element.20), metadata={op_name="while"} } %region_1.27 (Arg_0.28: (s32[], s32[], f32[1024,3], s32[1])) -> pred[] { %Arg_0.28 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0) %get-tuple-element.30 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.28), index=1, metadata={op_name="while"} %constant.29 = s32[] constant(1024) ROOT %compare.31 = pred[] compare(s32[] %get-tuple-element.30, s32[] %constant.29), direction=LT, metadata={op_name="while/Less"} } %region_2.42 (Arg_0.43: (f32[3,32,32,3], token[])) -> (pred[], token[]) { %constant.44 = pred[] constant(true) %Arg_0.43 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0) %get-tuple-element.52 = f32[3,32,32,3]{3,2,1,0} get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=0, metadata={op_name="image_sample/write_summary/summary_cond"} %constant.49 = f32[] constant(255.5) %broadcast.50 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.49), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"} %multiply.53 = f32[3,32,32,3]{3,2,1,0} multiply(f32[3,32,32,3]{3,2,1,0} %get-tuple-element.52, f32[3,32,32,3]{3,2,1,0} %broadcast.50), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"} %constant.47 = f32[] constant(0) %broadcast.48 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.47), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"} %maximum.54 = f32[3,32,32,3]{3,2,1,0} maximum(f32[3,32,32,3]{3,2,1,0} %multiply.53, f32[3,32,32,3]{3,2,1,0} %broadcast.48), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"} %constant.45 = f32[] constant(255) %broadcast.46 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.45), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"} %minimum.55 = f32[3,32,32,3]{3,2,1,0} minimum(f32[3,32,32,3]{3,2,1,0} %maximum.54, f32[3,32,32,3]{3,2,1,0} %broadcast.46), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"} %convert.56 = u8[3,32,32,3]{3,2,1,0} convert(f32[3,32,32,3]{3,2,1,0} %minimum.55), metadata={op_name="image_sample/write_summary/summary_cond/convert_image"} %get-tuple-element.51 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=1, metadata={op_name="image_sample/write_summary/summary_cond"} %send.57 = (u8[3,32,32,3]{3,2,1,0}, u32[], token[]) send(u8[3,32,32,3]{3,2,1,0} %convert.56, token[] %get-tuple-element.51), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"} %send-done.58 = token[] send-done((u8[3,32,32,3]{3,2,1,0}, u32[], token[]) %send.57), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"} ROOT %tuple.59 = (pred[], token[]) tuple(pred[] %constant.44, token[] %send-done.58), metadata={op_name="image_sample/write_summary/summary_cond"} } %region_3.60 (Arg_0.61: (f32[3,32,32,3], token[])) -> (pred[], token[]) { %constant.62 = pred[] constant(false) %Arg_0.61 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0) %get-tuple-element.63 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.61), index=1, metadata={op_name="image_sample/write_summary/summary_cond"} ROOT %tuple.64 = (pred[], token[]) tuple(pred[] %constant.62, token[] %get-tuple-element.63), metadata={op_name="image_sample/write_summary/summary_cond"} } ENTRY %main.67 (arg_tuple.1: (s32[])) -> () { %arg_tuple.1 = (s32[]{:T(256)}) parameter(0) %get-tuple-element.2 = s32[]{:T(256)} get-tuple-element((s32[]{:T(256)}) %arg_tuple.1), index=0 %constant.3 = s32[] constant(0) %compare.8 = pred[]{:T(256)} compare(s32[]{:T(256)} %get-tuple-element.2, s32[] %constant.3), direction=EQ, metadata={op_name="image_sample/write_summary/Equal"} %constant.5 = f32[] constant(0) %broadcast.6 = f32[1024,3]{1,0} broadcast(f32[] %constant.5), dimensions={}, metadata={op_name="tokens_accumulator"} %constant.4 = s32[1]{0} constant({1024}) %tuple.9 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %constant.3, s32[] %constant.3, f32[1024,3]{1,0} %broadcast.6, s32[1]{0} %constant.4), metadata={op_name="while"} %while.32 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) while((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %tuple.9), condition=%region_1.27, body=%region_0.10, metadata={op_name="while"} %get-tuple-element.33 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %while.32), index=2, metadata={op_name="while"} %transpose.34 = f32[3,1024]{0,1} transpose(f32[1024,3]{1,0} %get-tuple-element.33), dimensions={1,0}, metadata={op_name="transpose.transpose/perm"} %reshape.35 = f32[3,32,32,1]{3,2,1,0} reshape(f32[3,1024]{0,1} %transpose.34), metadata={op_name="Reshape"} %broadcast.36 = f32[3,32,32,1]{3,2,1,0} broadcast(f32[3,32,32,1]{3,2,1,0} %reshape.35), dimensions={0,1,2,3}, metadata={op_name="Tile"} %reshape.37 = f32[3,32,32]{2,1,0} reshape(f32[3,32,32,1]{3,2,1,0} %broadcast.36), metadata={op_name="Tile"} %broadcast.38 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[3,32,32]{2,1,0} %reshape.37), dimensions={0,1,2}, metadata={op_name="Tile"} %after-all.7 = token[] after-all(), metadata={op_name="image_sample/write_summary/summary_cond"} %send.39 = (pred[]{:T(256)}, u32[], token[]) send(pred[]{:T(256)} %compare.8, token[] %after-all.7), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"} %send-done.40 = token[] send-done((pred[]{:T(256)}, u32[], token[]) %send.39), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"} %tuple.41 = (f32[3,32,32,3]{3,2,1,0}, token[]) tuple(f32[3,32,32,3]{3,2,1,0} %broadcast.38, token[] %send-done.40), metadata={op_name="image_sample/write_summary/summary_cond"} %conditional.65 = (pred[], token[]) conditional(pred[]{:T(256)} %compare.8, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41), true_computation=%region_2.42, false_computation=%region_3.60, metadata={op_name="image_sample/write_summary/summary_cond"} ROOT %tuple.66 = () tuple() } )") .value(); const HloLivenessAnalysis& liveness = RunLiveness(module.get()); EXPECT_TRUE( liveness.IsLive(GetInstruction(module.get(), "conditional.65"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.41"), {})); EXPECT_TRUE(liveness.IsLive( GetInstruction(module.get(), "get-tuple-element.33"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.32"), {})); EXPECT_TRUE(liveness.IsLive( GetInstruction(module.get(), "dynamic-update-slice.24"), {})); EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "send.57"), {})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
02bc84dc-502e-49cb-bc34-f1006c412337
cpp
tensorflow/tensorflow
defuser
third_party/xla/xla/service/defuser.cc
third_party/xla/xla/service/defuser_test.cc
#include "xla/service/defuser.h" #include <algorithm> #include <memory> #include <numeric> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/call_graph.h" #include "xla/status_macros.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" namespace xla { absl::StatusOr<bool> Defuser::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Defusing module " << module->name(); XLA_VLOG_LINES(2, "Before defusion:\n" + module->ToString()); bool changed = false; std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module); TF_RETURN_IF_ERROR(call_graph->VisitNodes( [&](const CallGraphNode& call_graph_node) -> absl::Status { if (call_graph_node.computation()->IsFusionComputation()) { TF_RET_CHECK(call_graph_node.caller_callsites().size() == 1); HloInstruction* fusion_instruction = call_graph_node.caller_callsites()[0].instruction(); TF_RETURN_IF_ERROR(fusion_instruction->Defuse()); changed = true; } return absl::OkStatus(); }, true)); XLA_VLOG_LINES(2, "After defusion:\n" + module->ToString()); return changed; } }
#include "xla/service/defuser.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { class DefuserTest : public HloTestBase { protected: int FusionCount(const HloModule* m) { int count = 0; for (HloComputation* computation : m->computations()) { if (computation->IsFusionComputation()) { count++; } } return count; } Defuser defuser_; const Shape shape_ = ShapeUtil::MakeShape(F32, {2, 2}); }; TEST_F(DefuserTest, NoFusionInstruction) { auto m = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); m->AddEntryComputation(builder.Build()); EXPECT_EQ(0, FusionCount(m.get())); EXPECT_FALSE(defuser_.Run(m.get()).value()); } TEST_F(DefuserTest, TrivialFusionInstructionAsRoot) { auto m = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); auto add = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); auto computation = m->AddEntryComputation(builder.Build()); computation->CreateFusionInstruction({add}, HloInstruction::FusionKind::kLoop); EXPECT_THAT(computation->root_instruction(), op::Fusion()); EXPECT_EQ(1, FusionCount(m.get())); EXPECT_TRUE(defuser_.Run(m.get()).value()); EXPECT_EQ(0, FusionCount(m.get())); EXPECT_THAT(computation->root_instruction(), op::Add(op::Parameter(), op::Parameter())); } TEST_F(DefuserTest, TrivialFusionInstructionNotAsRoot) { auto m = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); auto add = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); builder.AddInstruction( HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add)); auto computation = m->AddEntryComputation(builder.Build()); computation->CreateFusionInstruction({add}, HloInstruction::FusionKind::kLoop); EXPECT_THAT(computation->root_instruction(), op::Negate(op::Fusion())); EXPECT_EQ(1, FusionCount(m.get())); EXPECT_TRUE(defuser_.Run(m.get()).value()); EXPECT_EQ(0, FusionCount(m.get())); EXPECT_THAT(computation->root_instruction(), op::Negate(op::Add(op::Parameter(), op::Parameter()))); } TEST_F(DefuserTest, NonTrivialFusionInstruction) { auto m = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); auto param3 = builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2")); auto add = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add)); auto sub = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate)); auto mul = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3)); auto div = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3)); auto constant = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); auto add2 = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div)); auto computation = m->AddEntryComputation(builder.Build()); computation->CreateFusionInstruction( {add2, constant, div, mul, sub, negate, add}, HloInstruction::FusionKind::kLoop); EXPECT_THAT(computation->root_instruction(), op::Fusion()); EXPECT_EQ(1, FusionCount(m.get())); EXPECT_TRUE(defuser_.Run(m.get()).value()); EXPECT_EQ(0, FusionCount(m.get())); EXPECT_THAT(computation->root_instruction(), op::Add(op::Constant(), op::Divide())); } TEST_F(DefuserTest, MultipleFusionInstructions) { auto m = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); auto param3 = builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2")); auto add = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add)); auto sub = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate)); auto mul = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3)); auto div = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3)); auto constant = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); auto add2 = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div)); auto computation = m->AddEntryComputation(builder.Build()); computation->CreateFusionInstruction({add2, constant, div, mul}, HloInstruction::FusionKind::kLoop); computation->CreateFusionInstruction({sub, negate, add}, HloInstruction::FusionKind::kLoop); EXPECT_THAT(computation->root_instruction(), op::Fusion()); EXPECT_EQ(2, FusionCount(m.get())); EXPECT_TRUE(defuser_.Run(m.get()).value()); EXPECT_EQ(0, FusionCount(m.get())); EXPECT_THAT(computation->root_instruction(), op::Add(op::Constant(), op::Divide())); } TEST_F(DefuserTest, NestedFusionInstructions) { auto m = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0")); auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1")); auto add = builder.AddInstruction( HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1)); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add)); auto computation = m->AddEntryComputation(builder.Build()); auto outer_fusion = computation->CreateFusionInstruction( {negate, add}, HloInstruction::FusionKind::kLoop); HloInstruction* fused_negate = outer_fusion->fused_expression_root(); ASSERT_EQ(fused_negate->opcode(), HloOpcode::kNegate); outer_fusion->fused_instructions_computation()->CreateFusionInstruction( {fused_negate}, HloInstruction::FusionKind::kLoop); EXPECT_THAT(computation->root_instruction(), op::Fusion()); EXPECT_EQ(2, FusionCount(m.get())); EXPECT_TRUE(defuser_.Run(m.get()).value()); EXPECT_EQ(0, FusionCount(m.get())); EXPECT_THAT(computation->root_instruction(), op::Negate(op::Add())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
46bc99e4-502f-4dcb-8ff3-7a075b7f1f98
cpp
tensorflow/tensorflow
elemental_ir_emitter
third_party/xla/xla/service/gpu/elemental_ir_emitter.cc
third_party/xla/xla/service/elemental_ir_emitter_test.cc
#include "xla/service/gpu/elemental_ir_emitter.h" #include <cstdint> #include <string> #include <vector> #include "absl/log/check.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/Support/ModRef.h" #include "llvm/TargetParser/Triple.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/service/elemental_ir_emitter.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/ir_emitter_nested.h" #include "xla/service/gpu/target_util.h" #include "xla/service/llvm_ir/ir_array.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/service/llvm_ir/math_ops.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { GpuElementalIrEmitter::GpuElementalIrEmitter( IrEmitterContext& ir_emitter_context, llvm::IRBuilder<>* b) : ElementalIrEmitter(ir_emitter_context.llvm_module(), b), ir_emitter_context_(ir_emitter_context) {} absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitDeviceMathCall( TargetDeviceFunctionID funcid, absl::Span<llvm::Value* const> operands, absl::Span<const PrimitiveType> input_types, PrimitiveType output_type, absl::string_view name) { bool cast_result_to_fp16 = false; std::vector<llvm::Value*> converted_operands(operands.begin(), operands.end()); std::vector<PrimitiveType> converted_input_types(input_types.begin(), input_types.end()); switch (output_type) { case F16: cast_result_to_fp16 = true; for (int64_t i = 0; i < operands.size(); ++i) { if (input_types[i] == F16) { converted_operands[i] = FPCast(converted_operands[i], b()->getFloatTy()); converted_input_types[i] = F32; } } output_type = F32; [[fallthrough]]; case F32: break; case F64: break; default: return Unimplemented("Bad type for device math call: %s", PrimitiveType_Name(output_type)); } const std::string& munged_callee = ObtainDeviceFunctionName( funcid, output_type, llvm::Triple(b()->GetInsertBlock()->getModule()->getTargetTriple())); llvm::Value* result = EmitMathCall(munged_callee, converted_operands, converted_input_types, output_type, name) .value(); if (cast_result_to_fp16) { result = FPCast(result, b()->getHalfTy()); } return result; } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitMathCall( const std::string& callee_name, absl::Span<llvm::Value* const> operands, absl::Span<const PrimitiveType> input_types, PrimitiveType output_type, absl::string_view name) { for (PrimitiveType input_type : input_types) { if (output_type != input_type) { return Unimplemented("Input type != output type: %s != %s", PrimitiveType_Name(input_type), PrimitiveType_Name(output_type)); } } return EmitDeviceFunctionCall(callee_name, operands, input_types, output_type, llvm::AttrBuilder(b()->getContext()) .addMemoryAttr(llvm::MemoryEffects::none()) .addAttribute(llvm::Attribute::NoUnwind), b(), name); } llvm_ir::IrArray::Index GpuElementalIrEmitter::GetSourceIndexOfBitcast( const llvm_ir::IrArray::Index& index, const HloInstruction* hlo) { Shape shape = hlo->shape(); Shape operand_shape = hlo->operand(0)->shape(); auto gpu_config = hlo->backend_config<GpuBackendConfig>(); CHECK_OK(gpu_config); const BitcastBackendConfig& bitcast_config = gpu_config.value().bitcast_backend_config(); if (!bitcast_config.result_layout().minor_to_major().empty()) { *shape.mutable_layout() = xla::Layout::CreateFromProto(bitcast_config.result_layout()); } if (!bitcast_config.source_layout().minor_to_major().empty()) { *operand_shape.mutable_layout() = xla::Layout::CreateFromProto(bitcast_config.source_layout()); } return index.SourceIndexOfBitcast(shape, operand_shape, b()); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitFloatBinaryOp( const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) { PrimitiveType lhs_input_type = op->operand(0)->shape().element_type(); PrimitiveType rhs_input_type = op->operand(1)->shape().element_type(); PrimitiveType output_type = op->shape().element_type(); HloOpcode opcode = op->opcode(); if (ir_emitter_context_.debug_options().xla_gpu_enable_fast_min_max() && (opcode == HloOpcode::kMaximum || opcode == HloOpcode::kMinimum)) { return llvm_ir::EmitCallToIntrinsic( opcode == HloOpcode::kMaximum ? llvm::Intrinsic::maxnum : llvm::Intrinsic::minnum, {lhs_value, rhs_value}, {lhs_value->getType()}, b()); } switch (op->opcode()) { case HloOpcode::kRemainder: { return EmitDeviceMathCall(TargetDeviceFunctionID::kFmod, {lhs_value, rhs_value}, {lhs_input_type, rhs_input_type}, output_type); } case HloOpcode::kPower: { return EmitPowerOp(op, lhs_value, rhs_value); } default: return ElementalIrEmitter::EmitFloatBinaryOp(op, lhs_value, rhs_value); } } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPowerOp( const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) { CHECK_EQ(op->opcode(), HloOpcode::kPower); PrimitiveType lhs_input_type = op->operand(0)->shape().element_type(); PrimitiveType rhs_input_type = op->operand(1)->shape().element_type(); PrimitiveType output_type = op->shape().element_type(); return EmitDeviceMathCall(TargetDeviceFunctionID::kPow, {lhs_value, rhs_value}, {lhs_input_type, rhs_input_type}, output_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kLog, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog1p( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kLog1p, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSin( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kSin, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCos( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kCos, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTan( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kTan, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExp( PrimitiveType prim_type, llvm::Value* value, absl::string_view ) { return EmitDeviceMathCall(TargetDeviceFunctionID::kExp, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExpm1( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kExpm1, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPow( PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs, absl::string_view name) { return EmitDeviceMathCall(TargetDeviceFunctionID::kPow, {lhs, rhs}, {prim_type, prim_type}, prim_type, name); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSqrt( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kSqrt, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitRsqrt( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kRsqrt, {value}, {prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitAtan2( PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs, absl::string_view name) { return EmitDeviceMathCall(TargetDeviceFunctionID::kAtan2, {lhs, rhs}, {prim_type, prim_type}, prim_type, name); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTanh( PrimitiveType prim_type, llvm::Value* value) { if (prim_type == F64) { return EmitDeviceMathCall(TargetDeviceFunctionID::kTanh, {value}, {prim_type}, prim_type); } llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType(); llvm::Value* input = FPCast(value, type); constexpr double kMaxValue = 20.0; auto max_value = llvm::ConstantFP::get(type, kMaxValue); llvm::Value* abs_value = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {input}, {type}, b()); llvm::Value* fast_tanh = llvm_ir::EmitFastTanh(b(), input); auto one = llvm::ConstantFP::get(type, 1.0); auto one_with_sign = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::copysign, {one, input}, {type}, b()); return FPCast(Select(FCmpULT(abs_value, max_value), fast_tanh, one_with_sign), value->getType(), "tanh"); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitErf( PrimitiveType prim_type, llvm::Value* value) { if (prim_type == F64) { return EmitDeviceMathCall(TargetDeviceFunctionID::kErf, {value}, {prim_type}, prim_type); } llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType(); if (type == b()->getFloatTy()) { llvm::Value* x = FPCast(value, type); auto* result = llvm_ir::EmitErfF32(b(), x); return FPCast(result, value->getType()); } return Unimplemented("erf"); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitComplexAbs( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kHypot, {EmitExtractReal(value), EmitExtractImag(value)}, {prim_type, prim_type}, prim_type); } absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCbrt( PrimitiveType prim_type, llvm::Value* value) { return EmitDeviceMathCall(TargetDeviceFunctionID::kCbrt, {value}, {prim_type}, prim_type); } absl::StatusOr<std::vector<llvm::Value*>> GpuElementalIrEmitter::EmitThreadLocalCall( const HloComputation& callee, absl::Span<llvm::Value* const> parameters, absl::string_view, bool ) { return CallNestedComputationWithScalars(b(), ir_emitter_context_, callee, parameters); } } }
#include "xla/service/elemental_ir_emitter.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <type_traits> #include <utility> #include <gtest/gtest.h> #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/hlo_module_config.h" #include "xla/service/llvm_ir/ir_array.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_macros.h" #include "xla/types.h" #include "tsl/platform/ml_dtypes.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using std::nullopt; class ElementalIrEmitterExecutionTest : public HloTestBase { protected: void RunTest(const std::string& hlo_text, absl::Span<Literal* const> args) { HloModuleConfig config; config.set_debug_options(GetDebugOptionsForTest()); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text, config)); EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), args, nullopt)); } void RunTypeConversionTest(absl::string_view hlo_text) { HloModuleConfig config; auto debug_options = GetDebugOptionsForTest(); debug_options.set_xla_cpu_fast_math_honor_nans(true); debug_options.set_xla_cpu_fast_math_honor_infs(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text, config)); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)})); } }; class ElementalIrEmitterExecutionTestWithoutFastMinMax : public ElementalIrEmitterExecutionTest { protected: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = ElementalIrEmitterExecutionTest::GetDebugOptionsForTest(); debug_options.set_xla_cpu_enable_fast_min_max(false); debug_options.set_xla_gpu_enable_fast_min_max(false); return debug_options; } }; template <typename T> class ElementalIrEmitterExecutionTypedTest : public ElementalIrEmitterExecutionTest { protected: const std::string& TypeName() { return primitive_util::LowercasePrimitiveTypeName( primitive_util::NativeToPrimitiveType<T>()); } }; using FloatTypes = ::testing::Types<bfloat16, tsl::float8_e5m2, tsl::float8_e5m2fnuz, tsl::float8_e4m3, tsl::float8_e4m3fn, tsl::float8_e4m3fnuz, tsl::float8_e4m3b11fnuz, tsl::float8_e3m4>; TYPED_TEST_SUITE(ElementalIrEmitterExecutionTypedTest, FloatTypes); XLA_TEST_F(ElementalIrEmitterExecutionTest, DotFusion) { const std::string hlo_text = R"( HloModule FusedDot fused_computation { arg0 = s32[1,2,1]{2,1,0} parameter(0) reshape.lhs = s32[2,1]{1,0} reshape(arg0) arg1 = s32[1,2,1]{2,1,0} parameter(1) reshape.rhs = s32[2,1]{1,0} reshape(arg1) ROOT dot = s32[1,1]{1,0} dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY main { entry_arg0 = s32[1,2,1]{2,1,0} parameter(0) entry_arg1 = s32[1,2,1]{2,1,0} parameter(1) ROOT fusion = s32[1,1]{1,0} fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation } )"; Literal lhs = LiteralUtil::CreateR3<int32_t>({{{1}, {2}}}); Literal rhs = LiteralUtil::CreateR3<int32_t>({{{3}, {4}}}); RunTest(hlo_text, {&lhs, &rhs}); } XLA_TEST_F(ElementalIrEmitterExecutionTest, ScalarDotFusion) { const char* hlo_text = R"( HloModule ScalarDotFusion fused_computation { arg0 = s32[2,2]{1,0} parameter(0) reshape.lhs = s32[4]{0} reshape(arg0) arg1 = s32[2,2]{1,0} parameter(1) reshape.rhs = s32[4]{0} reshape(arg1) ROOT dot = s32[] dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY main { entry_arg0 = s32[2,2]{1,0} parameter(0) entry_arg1 = s32[2,2]{1,0} parameter(1) ROOT fusion = s32[] fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation } )"; Literal lhs = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}}); Literal rhs = LiteralUtil::CreateR2<int32_t>({{10, 20}, {30, 40}}); RunTest(hlo_text, {&lhs, &rhs}); } XLA_TEST_F(ElementalIrEmitterExecutionTest, BatchDot) { const char* hlo_text = R"( HloModule BatchDot fused_computation.1 { param_0 = f64[1,1,8]{2,1,0} parameter(0) r.1 = f64[2,4]{1,0} reshape(param_0) param_1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1) r.2 = f64[2,4,1]{2,1,0} reshape(param_1) ROOT dot = f64[2,1]{1,0} dot(r.1, r.2), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} } ENTRY resampler_Resampler.49 { p0 = f64[1,1,8]{2,1,0} parameter(0) p1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1) ROOT f = f64[2,1]{1,0} fusion(p0, p1), kind=kLoop, calls=fused_computation.1 } )"; HloModuleConfig config; auto debug_options = GetDebugOptionsForTest(); debug_options.add_xla_disable_hlo_passes("layout-assignment"); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text, config)); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{4e-3, 4e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTest, DivideComplexNumbersWithInfiniteNormRhs) { constexpr char hlo_text[] = R"( HloModule DivideComplexNumbers ENTRY DivideComplexNumbers { constant.1 = c64[8]{0} constant({ (1, 1), (1, inf), (1, inf), (nan, 1), (inf, inf), (inf, nan), (nan, nan), (1, 2)}) real = f32[8]{0} constant({nan, nan, inf, inf, inf, 1, inf, 3}) imag = f32[8]{0} constant({inf, inf, inf, inf, 1, inf, inf, 4}) complex.2 = c64[8]{0} complex(real, imag) ROOT divide.1 = c64[8]{0} divide(constant.1, complex.2) } )"; HloModuleConfig config; auto debug_options = GetDebugOptionsForTest(); debug_options.set_xla_cpu_fast_math_honor_nans(true); debug_options.set_xla_cpu_fast_math_honor_infs(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text, config)); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)})); } XLA_TEST_F(ElementalIrEmitterExecutionTest, DivideComplexNumbersWithFiniteNormRhs) { constexpr char hlo_text[] = R"( HloModule DivideComplexNumbers ENTRY DivideComplexNumbers { constant.1 = c64[5]{0} constant({ (1, inf), (inf, 1), (inf, nan), (inf, inf), (nan, inf)}) real = f32[5]{0} constant({1, 1, 1, 1, 1}) imag = f32[5]{0} constant({1, 1, 1, 1, 1}) complex.2 = c64[5]{0} complex(real, imag) ROOT divide.1 = c64[5]{0} divide(constant.1, complex.2) } )"; HloModuleConfig config; auto debug_options = GetDebugOptionsForTest(); debug_options.set_xla_cpu_fast_math_honor_nans(true); debug_options.set_xla_cpu_fast_math_honor_infs(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text, config)); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)})); } XLA_TEST_F(ElementalIrEmitterExecutionTest, DivideComplexNumbersWithZeroNormRhs) { constexpr char hlo_text[] = R"( HloModule DivideComplexNumbers ENTRY DivideComplexNumbers { constant.1 = c64[9]{0} constant({ (1, 1), (1, nan), (1, inf), (inf, inf), (inf, 1), (inf, nan), (nan, 1), (nan, inf), (nan, nan)}) real = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0}) imag = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0}) complex.2 = c64[9]{0} complex(real, imag) ROOT divide.1 = c64[9]{0} divide(constant.1, complex.2) } )"; HloModuleConfig config; auto debug_options = GetDebugOptionsForTest(); debug_options.set_xla_cpu_fast_math_honor_nans(true); debug_options.set_xla_cpu_fast_math_honor_infs(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text, config)); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)})); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatsToFloat) { auto tname = this->TypeName(); if (std::is_same<TypeParam, tsl::float8_e4m3>() || std::is_same<TypeParam, tsl::float8_e4m3fn>() || std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>() || std::is_same<TypeParam, tsl::float8_e3m4>()) { GTEST_SKIP() << "Skipping test for type " << tname; } const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { f16_ = f16[] parameter(0) f32_ = f32[] parameter(1) f64_ = f64[] parameter(2) bf16_ = bf16[] parameter(3) converted_f16 = ${tname}[] convert(f16_) converted_f32 = ${tname}[] convert(f32_) converted_f64 = ${tname}[] convert(f64_) converted_bf16 = ${tname}[] convert(bf16_) ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple( converted_f16, converted_f32, converted_f64, converted_bf16) } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertSignedToFloat) { auto tname = this->TypeName(); const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { s8_ = s8[] parameter(0) s16_ = s16[] parameter(1) s32_ = s32[] parameter(2) s64_ = s64[] parameter(3) converted_s8 = ${tname}[] convert(s8_) converted_s16 = ${tname}[] convert(s16_) converted_s32 = ${tname}[] convert(s32_) converted_s64 = ${tname}[] convert(s64_) ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple( converted_s8, converted_s16, converted_s32, converted_s64) } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertUnsignedToFloat) { auto tname = this->TypeName(); const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { u8_ = u8[] parameter(0) u16_ = u16[] parameter(1) u32_ = u32[] parameter(2) u64_ = u64[] parameter(3) converted_u8 = ${tname}[] convert(u8_) converted_u16 = ${tname}[] convert(u16_) converted_u32 = ${tname}[] convert(u32_) converted_u64 = ${tname}[] convert(u64_) ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple( converted_u8, converted_u16, converted_u32, converted_u64) } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToFloats) { auto tname = this->TypeName(); const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { to_f16 = ${tname}[] parameter(0) to_f32 = ${tname}[] parameter(1) to_f64 = ${tname}[] parameter(2) to_bf16 = ${tname}[] parameter(3) f16_ = f16[] convert(to_f16) f32_ = f32[] convert(to_f32) f64_ = f64[] convert(to_f64) bf16_ = bf16[] convert(to_f64) ROOT tuple = (f16[], f32[], f64[], bf16[]) tuple(f16_, f32_, f64_, bf16_) } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToSigned) { auto tname = this->TypeName(); const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { to_s8 = ${tname}[] parameter(0) to_s16 = ${tname}[] parameter(1) to_s32 = ${tname}[] parameter(2) to_s64 = ${tname}[] parameter(3) s8_ = s8[] convert(to_s8) s16_ = s16[] convert(to_s16) s32_ = s32[] convert(to_s32) s64_ = s64[] convert(to_s64) ROOT tuple = (s8[], s16[], s32[], s64[]) tuple(s8_, s16_, s32_, s64_) } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToUnsigned) { auto tname = this->TypeName(); const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { to_u8 = ${tname}[] parameter(0) to_u16 = ${tname}[] parameter(1) to_u32 = ${tname}[] parameter(2) to_u64 = ${tname}[] parameter(3) u8_ = u8[] convert(to_u8) u16_ = u16[] convert(to_u16) u32_ = u32[] convert(to_u32) u64_ = u64[] convert(to_u64) ROOT tuple = (u8[], u16[], u32[], u64[]) tuple(u8_, u16_, u32_, u64_) } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToComplex) { auto tname = this->TypeName(); const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { to_c64 = ${tname}[] parameter(0) to_c128 = ${tname}[] parameter(1) c64_ = c64[] convert(to_c64) c128_ = c128[] convert(to_c128) ROOT tuple = (c64[], c128[]) tuple(c64_, c128_) } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, CompareFloat) { auto tname = this->TypeName(); if (std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>()) { GTEST_SKIP() << "Skipping test for type " << tname; } const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { p0 = ${tname}[4] parameter(0) p1 = ${tname}[4] parameter(1) ROOT cmp = pred[4] compare(p0, p1), direction=LT })", {{"${tname}", tname}}); Literal lhs = LiteralUtil::CreateR1<TypeParam>( {TypeParam(1.), TypeParam(2.), TypeParam(3.), TypeParam(4.)}); Literal rhs = LiteralUtil::CreateR1<TypeParam>( {TypeParam(4.), TypeParam(4.), TypeParam(2.), TypeParam(1.)}); ElementalIrEmitterExecutionTest::RunTest(hlo_text, {&lhs, &rhs}); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, IotaFloat) { auto tname = this->TypeName(); if (std::is_same<TypeParam, tsl::float8_e5m2>() || std::is_same<TypeParam, tsl::float8_e4m3>() || std::is_same<TypeParam, tsl::float8_e4m3fn>() || std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>() || std::is_same<TypeParam, tsl::float8_e3m4>()) { GTEST_SKIP() << "Skipping test for type " << tname; } const auto hlo_text = absl::StrReplaceAll(R"( HloModule m ENTRY main { ROOT iota_ = ${tname}[4] iota(), iota_dimension=0 } )", {{"${tname}", tname}}); ElementalIrEmitterExecutionTest::RunTest(hlo_text, {}); } TYPED_TEST(ElementalIrEmitterExecutionTypedTest, BatchDotFloat) { auto tname = this->TypeName(); const auto hlo_text = absl::StrReplaceAll(R"( HloModule matmul ENTRY main { x = ${tname}[8,16] parameter(0) y = ${tname}[8,16,32] parameter(1) ROOT dot = ${tname}[8,32] dot(x, y), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1} } )", {{"${tname}", tname}}); HloModuleConfig config; DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, HloTestBase::ParseAndReturnVerifiedModule(hlo_text, config)); EXPECT_TRUE( HloTestBase::RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, MinimumHandlesNaNsOnTheLeft) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { neg1 = f32[] constant(-1) neg1s = f32[5,5] broadcast(neg1), dimensions={} nans = f32[5,5] sqrt(neg1s) ROOT min = f32[5,5] minimum(nans, neg1s) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, DISABLED_MinimumHandlesNaNsOnTheRight) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { neg1 = f32[] constant(-1) neg1s = f32[5,5] broadcast(neg1), dimensions={} nans = f32[5,5] sqrt(neg1s) ROOT min = f32[5,5] minimum(neg1s, nans) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, MaximumHandlesNaNsOnTheLeft) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { neg1 = f32[] constant(-1) neg1s = f32[5,5] broadcast(neg1), dimensions={} nans = f32[5,5] sqrt(neg1s) ROOT max = f32[5,5] maximum(nans, neg1s) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, MaximumHandlesNaNsOnTheRight) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { neg1 = f32[] constant(-1) neg1s = f32[5,5] broadcast(neg1), dimensions={} nans = f32[5,5] sqrt(neg1s) ROOT max = f32[5,5] maximum(neg1s, nans) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, MinimumReturnsLHS) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { zero = f32[] constant(0) zeros = f32[5,5] broadcast(zero), dimensions={} one = f32[] constant(1) ones = f32[5,5] broadcast(one), dimensions={} ROOT min = f32[5,5] minimum(zeros, ones) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, MinimumReturnsRHS) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { zero = f32[] constant(0) zeros = f32[5,5] broadcast(zero), dimensions={} one = f32[] constant(1) ones = f32[5,5] broadcast(one), dimensions={} ROOT min = f32[5,5] minimum(ones, zeros) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, MaximumReturnsLHS) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { zero = f32[] constant(0) zeros = f32[5,5] broadcast(zero), dimensions={} one = f32[] constant(1) ones = f32[5,5] broadcast(one), dimensions={} ROOT max = f32[5,5] maximum(ones, zeros) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax, MaximumReturnsRHS) { constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { zero = f32[] constant(0) zeros = f32[5,5] broadcast(zero), dimensions={} one = f32[] constant(1) ones = f32[5,5] broadcast(one), dimensions={} ROOT max = f32[5,5] maximum(zeros, ones) })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } class ElementalIrEmitterInternalTest : public HloTestBase {}; XLA_TEST_F(ElementalIrEmitterInternalTest, SparseDotIsUnsupported) { constexpr absl::string_view kHloText = R"( HloModule test ENTRY main { lhs = f16[5,16] parameter(0) rhs = f16[32,10] parameter(1) meta = u16[5,2] parameter(2) ROOT dot = f32[5,10] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloText)); HloInstruction* root = module->entry_computation()->root_instruction(); llvm::LLVMContext llvm_context; llvm::Module llvm_module("", llvm_context); llvm::IRBuilder<> builder(llvm_context); ElementalIrEmitterForTests emitter(&llvm_module, &builder); llvm_ir::IrArray::Index test_index{builder.getInt64Ty()}; auto result = emitter.TestElementalDot(root, test_index); EXPECT_FALSE(result.ok()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/elemental_ir_emitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/elemental_ir_emitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
570c48f7-236d-4c3e-8e7d-44cba5bf1ea5
cpp
tensorflow/tensorflow
buffer_assignment
third_party/xla/xla/service/buffer_assignment.cc
third_party/xla/xla/service/buffer_assignment_test.cc
#include "xla/service/buffer_assignment.h" #include <algorithm> #include <cstdint> #include <deque> #include <iterator> #include <memory> #include <optional> #include <ostream> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_op_metadata.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_live_range.h" #include "xla/map_util.h" #include "xla/service/buffer_value.h" #include "xla/service/buffer_value_containers.h" #include "xla/service/heap_simulator/heap_simulator.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_alias_analysis.h" #include "xla/service/hlo_buffer.h" #include "xla/service/hlo_value.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/numbers.h" namespace xla { namespace { using absl::flat_hash_map; using absl::flat_hash_set; using absl::StrAppend; using absl::StrAppendFormat; using memory_space_assignment::PresetAssignments; using ::tsl::strings::HumanReadableNumBytes; absl::flat_hash_map<int64_t, const HloInstruction*> BuildIdToHloInstructionMap( const HloModule* module) { absl::flat_hash_map<int64_t, const HloInstruction*> id_to_hlo_instruction; for (const HloComputation* computation : module->computations()) { for (const HloInstruction* instruction : computation->instructions()) { id_to_hlo_instruction[instruction->unique_id()] = instruction; } } return id_to_hlo_instruction; } absl::StatusOr<absl::flat_hash_map<int64_t, const HloValue*>> BuildIdToLogicalBufferMap( const BufferAssignmentProto& proto, const absl::flat_hash_map<int64_t, const HloInstruction*>& id_to_hlo_instruction, const std::unique_ptr<HloAliasAnalysis>& alias_analysis) { absl::flat_hash_map<int64_t, const HloValue*> id_to_logical_buffer; for (const LogicalBufferProto& logical_buffer_proto : proto.logical_buffers()) { TF_RET_CHECK(logical_buffer_proto.has_defined_at()) << "Expected logical buffer to have location information in the proto."; TF_RET_CHECK(id_to_hlo_instruction.contains( logical_buffer_proto.defined_at().instruction_id())) << "Expected hlo instruction " << "with the id '" << logical_buffer_proto.defined_at().instruction_id() << "' in the proto to also exist in the " "HLO module."; const HloInstruction* hlo_instruction = id_to_hlo_instruction.at( logical_buffer_proto.defined_at().instruction_id()); std::vector<int64_t> shape_idx_vals; absl::c_copy(logical_buffer_proto.defined_at().shape_index(), std::back_inserter(shape_idx_vals)); ShapeIndex proto_shape_index(shape_idx_vals); auto& logical_buffer = alias_analysis->dataflow_analysis().GetUniqueValueAt( hlo_instruction, proto_shape_index); logical_buffer.set_color(logical_buffer_proto.color()); id_to_logical_buffer[logical_buffer_proto.id()] = &logical_buffer; } return id_to_logical_buffer; } } absl::Status GatherComputationsByAllocationType( const HloModule* module, std::vector<const HloComputation*>* thread_local_computations, std::vector<const HloComputation*>* global_computations) { std::deque<std::pair<const HloComputation*, bool>> worklist; worklist.push_back(std::make_pair(module->entry_computation(), false)); flat_hash_set<const HloComputation*> thread_local_set; flat_hash_set<const HloComputation*> global_set; while (!worklist.empty()) { auto worklist_front = worklist.front(); worklist.pop_front(); const HloComputation* computation = worklist_front.first; bool is_thread_local = worklist_front.second; bool in_thread_local_set = thread_local_set.contains(computation); bool in_global_set = global_set.contains(computation); if ((is_thread_local && in_thread_local_set) || (!is_thread_local && in_global_set)) { continue; } if ((is_thread_local && in_global_set) || (!is_thread_local && in_thread_local_set)) { return InvalidArgument( "computation %s has conflicting allocation requirements (global " "and thread-local)", computation->name()); } if (is_thread_local) { thread_local_set.insert(computation); } else { global_set.insert(computation); } for (auto* instruction : computation->instructions()) { for (HloComputation* subcomputation : instruction->called_computations()) { switch (instruction->opcode()) { case HloOpcode::kCall: case HloOpcode::kConditional: case HloOpcode::kWhile: case HloOpcode::kAsyncStart: case HloOpcode::kAsyncUpdate: case HloOpcode::kAsyncDone: if (is_thread_local) { return InvalidArgument( "computation %s cannot contain call/while op because it " "requires thread-local buffer allocations", computation->name()); } worklist.push_back(std::make_pair(subcomputation, false)); break; case HloOpcode::kCustomCall: case HloOpcode::kAllReduce: case HloOpcode::kReduceScatter: case HloOpcode::kAllReduceStart: case HloOpcode::kMap: case HloOpcode::kReduce: case HloOpcode::kReduceWindow: case HloOpcode::kScatter: case HloOpcode::kSelectAndScatter: case HloOpcode::kSort: case HloOpcode::kFusion: worklist.push_back(std::make_pair(subcomputation, true)); break; default: return Internal("Unexpected calling opcode: %s", HloOpcodeString(instruction->opcode())); } } } } for (auto* computation : module->MakeComputationPostOrder()) { if (thread_local_set.contains(computation)) { thread_local_computations->push_back(computation); } else if (global_set.contains(computation)) { global_computations->push_back(computation); } } return absl::OkStatus(); } std::string BufferAllocation::Slice::ToString() const { return absl::StrCat("{index:", allocation_ == nullptr ? -1 : index(), ", offset:", offset_, ", size:", size_, "}"); } BufferAllocation::Slice BufferAllocation::GetSlice( const HloValue& buffer) const { const OffsetSize os = FindOrDie(assigned_buffers_, &buffer); return Slice(this, os.offset, os.size); } void BufferAllocation::AddAssignment(const HloValue& buffer, int64_t offset, int64_t size) { VLOG(4) << "Adding the following buffer to allocation #" << index() << absl::StrFormat(" (size=%d, offset=%d) %s", size, offset, buffer.ToShortString()); CHECK(!assigned_buffers_.contains(&buffer)) << "LogicalBuffer " << buffer << " already assigned to allocation " << index_; CHECK_LE(offset, size_) << "LogicalBuffer " << buffer << " offset out of range"; CHECK_LE(offset + size, size_) << "LogicalBuffer " << buffer << " size out of range at offset: " << offset << " with size: " << size; if (!(IsPreallocatedTempBuffer() && color() != 0)) { CHECK_EQ(buffer.color(), color()) << "Buffer color " << buffer.color() << " for buffer " << buffer << " does not match allocation color " << color() << "."; } OffsetSize offset_size; offset_size.offset = offset; offset_size.size = size; assigned_buffers_.emplace(&buffer, offset_size); for (HloPosition position : buffer.positions()) { Shape* shape = ShapeUtil::GetMutableSubshape( position.instruction->mutable_shape(), position.index); if (shape->has_layout()) { shape->mutable_layout()->set_memory_space(buffer.color()); } } } BufferAllocationProto BufferAllocation::ToProto() const { BufferAllocationProto proto; proto.set_index(index_); proto.set_size(size_); proto.set_is_thread_local(is_thread_local_); proto.set_is_tuple(is_tuple_); proto.set_color(color_); if (is_entry_computation_parameter_) { proto.set_is_entry_computation_parameter(true); for (int64_t idx : param_shape_index()) { proto.add_parameter_shape_index(idx); } proto.set_parameter_number(parameter_number_); } proto.set_is_constant(is_constant_); proto.set_maybe_live_out(maybe_live_out_); for (const auto& buffer_offset_size : assigned_buffers_) { BufferAllocationProto::Assigned* proto_assigned = proto.add_assigned(); proto_assigned->set_logical_buffer_id(buffer_offset_size.first->id()); proto_assigned->set_offset(buffer_offset_size.second.offset); proto_assigned->set_size(buffer_offset_size.second.size); } absl::c_sort(*proto.mutable_assigned(), [](const BufferAllocationProto::Assigned& assign1, const BufferAllocationProto::Assigned& assign2) { return assign1.logical_buffer_id() < assign2.logical_buffer_id(); }); return proto; } static bool CompareHloValuesById(const HloValue* a, const HloValue* b) { return a->id() < b->id(); } static const HloInstruction* GetEntryParameterInstruction( const BufferAllocation& alloc) { for (const auto& p : alloc.assigned_buffers()) { const HloValue* value = p.first; const HloInstruction* instr = value->instruction(); if (instr->opcode() == HloOpcode::kParameter && instr->parent() == instr->GetModule()->entry_computation()) { return instr; } } return nullptr; } static const HloInstruction* GetOutputInstruction( const BufferAllocation& alloc) { for (const auto& p : alloc.assigned_buffers()) { const HloValue* value = p.first; for (const HloPosition& position : value->positions()) { const HloInstruction* instr = position.instruction; if (position.index.empty() && instr->parent()->root_instruction() == instr && instr->parent()->IsEntryComputation()) { return instr; } } } return nullptr; } std::string BufferAllocation::ToShortString() const { std::string output; StrAppendFormat(&output, "allocation %d: size %d", index_, size()); if (color() != 0) { StrAppend(&output, ", color ", color()); } if (is_entry_computation_parameter()) { const HloInstruction* param = GetEntryParameterInstruction(*this); StrAppend(&output, ", parameter ", parameter_number(), ", shape |", param ? param->shape().ToString(false) : "<unknown shape>", "| at ShapeIndex ", param_shape_index().ToString()); } if (const HloInstruction* instr = GetOutputInstruction(*this)) { StrAppend(&output, ", output shape is |", instr->shape().ToString(false), "|"); } if (is_constant()) { StrAppend(&output, ", constant"); } if (is_thread_local()) { StrAppend(&output, ", thread-local"); } if (maybe_live_out()) { StrAppend(&output, ", maybe-live-out"); } if (IsPreallocatedTempBuffer()) { StrAppend(&output, ", preallocated-temp"); } StrAppend(&output, ":\n"); return output; } std::string BufferAllocation::ToString() const { std::string output = ToShortString(); std::vector<const HloValue*> sorted_buffers; for (const auto& buffer_offset_size : assigned_buffers_) { sorted_buffers.push_back(buffer_offset_size.first); } absl::c_sort(sorted_buffers, &CompareHloValuesById); for (const HloValue* buffer : sorted_buffers) { const OffsetSize& offset_size = FindOrDie(assigned_buffers_, buffer); StrAppend(&output, absl::StrFormat( " value: %s (size=%d,offset=%d): %s\n", buffer->ToShortString(), offset_size.size, offset_size.offset, ShapeUtil::HumanStringWithLayout(buffer->shape()))); } return output; } std::ostream& operator<<(std::ostream& out, const BufferAllocation& buffer) { out << buffer.ToString(); return out; } std::ostream& operator<<(std::ostream& out, const BufferAllocation::Slice& s) { out << s.ToString(); return out; } bool BufferAssignment::HasAllocation(const HloValue& value) const { return allocation_index_for_value_.contains(&value); } bool BufferAssignment::HasAllocation(HloValue::Id value_id) const { return HasAllocation(dataflow_analysis().GetValue(value_id)); } bool BufferAssignment::HasAllocation(const HloBuffer& buffer) const { return allocation_index_for_value_.contains(buffer.values()[0]); } const BufferAllocation& BufferAssignment::GetAssignedAllocation( const HloValue& value) const { CHECK(HasAllocation(value)); return GetAllocation(allocation_index_for_value_.at(&value)); } const BufferAllocation& BufferAssignment::GetAssignedAllocation( const HloBuffer& hlo_buffer) const { return GetAssignedAllocation(*hlo_buffer.values()[0]); } BufferAllocation* BufferAssignment::GetMutableAssignedAllocation( const HloBuffer& buffer) { return const_cast<BufferAllocation*>(&GetAssignedAllocation(buffer)); } std::set<BufferAllocation::Slice> BufferAssignment::GetAllSlices( const HloInstruction* instruction, const ShapeIndex& index) const { std::set<BufferAllocation::Slice> result; for (const HloValue* value : dataflow_analysis().GetValueSet(instruction, index).values()) { if (HasAllocation(*value)) { result.insert(GetAssignedAllocation(*value).GetSlice(*value)); } } return result; } const BufferAllocation& BufferAssignment::GetAllocation( BufferAllocation::Index index) const { CHECK_GE(index, 0); CHECK_LT(index, allocations_.size()); return allocations_[index]; } const BufferAllocation* BufferAssignment::GetInstructionAllocation( const HloInstruction* hlo, const ShapeIndex& shape_index) const { const HloValue* value = dataflow_analysis().GetValueSet(hlo, shape_index).values()[0]; if (!HasAllocation(*value)) { return nullptr; } const BufferAllocation& instruction_allocation = GetAssignedAllocation(*value); return &instruction_allocation; } BufferAllocation* BufferAssignment::GetMutableAllocation( BufferAllocation::Index index) { return const_cast<BufferAllocation*>(&GetAllocation(index)); } bool BufferAssignment::HasAllocationAt(const HloInstruction* instruction, const ShapeIndex& index) const { return absl::c_any_of( dataflow_analysis().GetValueSet(instruction, index).values(), IsKeyIn(allocation_index_for_value_)); } bool BufferAssignment::HasTopLevelAllocation( const HloInstruction* instruction) const { return HasAllocationAt(instruction, {}); } absl::StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueSlice( const HloInstruction* instruction, const ShapeIndex& index) const { VLOG(3) << "Trying to find unique slice for " << instruction->name() << " [" << index << "]"; BufferAllocation::Slice result; for (const HloValue* value : dataflow_analysis().GetValueSet(instruction, index).values()) { VLOG(3) << "Examining value " << *value; if (HasAllocation(*value)) { VLOG(3) << "Has allocation"; const BufferAllocation::Slice slice = GetAssignedAllocation(*value).GetSlice(*value); if (result.allocation() == nullptr) { result = slice; } else if (result != slice) { return FailedPrecondition( "BufferAllocation::Slice for instruction %s at index %s cannot " "be determined at compile-time.", instruction->name(), index.ToString()); } } else { VLOG(3) << "No allocation"; } } if (result.allocation() == nullptr) { return FailedPrecondition( "BufferAllocation::Slice not assigned for instruction %s at index %s", instruction->name(), index.ToString()); } return result; } absl::StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueTopLevelSlice( const HloInstruction* instruction) const { return GetUniqueSlice(instruction, {}); } bool BufferAssignment::SharesSliceAtIndex( const HloInstruction* hlo_a, const ShapeIndex& shape_index_a, const HloInstruction* hlo_b, const ShapeIndex& shape_index_b) const { return GetUniqueSlice(hlo_a, shape_index_a).value() == GetUniqueSlice(hlo_b, shape_index_b).value(); } bool BufferAssignment::HaveDisjointSlices(const HloInstruction* hlo_a, const HloInstruction* hlo_b) const { using SliceSet = flat_hash_set<BufferAllocation::Slice>; auto collect_slices = [&](const HloInstruction* instr) -> SliceSet { SliceSet slices; absl::Status status = ShapeUtil::ForEachSubshapeWithStatus( instr->shape(), [&](const Shape& , const ShapeIndex& index) -> absl::Status { auto shape_slices = GetAllSlices(instr, index); if (shape_slices.empty()) { return InvalidArgument("No slices assigned to part of instr."); } slices.insert(shape_slices.begin(), shape_slices.end()); return absl::OkStatus(); }); if (!status.ok()) { return {}; } return slices; }; SliceSet slices_a = collect_slices(hlo_a); SliceSet slices_b = collect_slices(hlo_b); return !slices_a.empty() && !slices_b.empty() && absl::c_none_of(slices_a, [&](const BufferAllocation::Slice& slice) { return slices_b.contains(slice); }); } absl::StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueTopLevelOutputSlice() const { return GetUniqueTopLevelSlice( module_->entry_computation()->root_instruction()); } BufferAllocation* BufferAssignment::NewEmptyAllocation( int64_t size, LogicalBuffer::Color color) { BufferAllocation::Index index = allocations_.size(); allocations_.emplace_back(index, size, color); BufferAllocation* allocation = &allocations_.back(); return allocation; } BufferAllocation* BufferAssignment::NewAllocation(const HloBuffer& buffer, int64_t size) { BufferAllocation* allocation = NewEmptyAllocation(size, buffer.color()); AddAssignment(allocation, buffer, 0, size); allocation->peak_buffers_.push_back(buffer.values()[0]); return allocation; } void BufferAssignment::AddAssignment(BufferAllocation* allocation, const HloBuffer& buffer, int64_t offset, int64_t size) { CHECK(allocation->is_reusable() || allocation->assigned_buffers().empty()) << "Non-reusable allocation already assigned a buffer: " << allocation->ToString(); for (const HloValue* buffer_value : buffer.values()) { CHECK(!allocation_index_for_value_.contains(buffer_value)) << "BufferValue " << buffer_value << " already has an allocation."; allocation->AddAssignment(*buffer_value, offset, size); allocation_index_for_value_[buffer_value] = allocation->index(); } if (alias_analysis().BufferLivesOut(buffer)) { VLOG(3) << "HloBuffer lives out: " << buffer.ToString(); VLOG(3) << "Set maybe live out: " << allocation->ToString(); allocation->set_maybe_live_out(true); } } void BufferAssignment::AddAssignment(BufferAllocation* allocation, const HloValue& value, int64_t offset, int64_t size) { allocation->AddAssignment(value, offset, size); allocation_index_for_value_[&value] = allocation->index(); const HloValue& hlo_value = *CHECK_NOTNULL(dynamic_cast<const HloValue*>(&value)); if (alias_analysis().ValueLivesOut(hlo_value)) { VLOG(3) << "HloValue lives out: " << hlo_value.ToString(); VLOG(3) << "Set maybe live out: " << allocation->ToString(); allocation->set_maybe_live_out(true); } } void BufferAssignment::CombineTempAllocations( const absl::flat_hash_set<BufferValue::Color>& private_stack_colors, std::optional<BufferValue::Color> temp_buffer_color) { VLOG(1) << "CombineTempAllocations()"; std::deque<BufferAllocation> combined_allocations; flat_hash_map<BufferValue::Color, BufferAllocation*> combined_allocation_map; const auto first_temp_it = std::partition(allocations_.begin(), allocations_.end(), [](const BufferAllocation& allocation) { return !allocation.IsPreallocatedTempBuffer(); }); if (first_temp_it != allocations_.end()) { for (auto it = first_temp_it; it != allocations_.end(); ++it) { BufferAllocation& temp_allocation = *it; BufferValue::Color color = temp_allocation.color(); auto combined_it = combined_allocation_map.find(color); if (combined_it == combined_allocation_map.end()) { VLOG(1) << "Combined temp allocation for color " << color << " is: " << temp_allocation; combined_allocations.emplace_back(temp_allocation); combined_allocation_map.emplace(color, &combined_allocations.back()); continue; } if (combined_it->second->size() + it->size() >= multiheap_size_constraint_per_heap_) { VLOG(1) << "Due to size constraint, reset temp allocation for color " << color << " to: " << temp_allocation; combined_allocations.emplace_back(temp_allocation); combined_allocation_map.emplace(color, &combined_allocations.back()); continue; } BufferAllocation* combined_allocation = combined_it->second; VLOG(1) << "Combined allocation absorbing temp allocation: " << temp_allocation; int64_t alignment = color_alignment_(color); int64_t base; bool is_private_stack = private_stack_colors.contains(color); if (is_private_stack) { base = 0; combined_allocation->set_size(std::max(base, temp_allocation.size())); } else { base = RoundUpTo(combined_allocation->size(), alignment); combined_allocation->set_size(base + temp_allocation.size()); } for (const auto& buffer_offset_size : temp_allocation.assigned_buffers_) { const HloValue* value = buffer_offset_size.first; const int64_t offset = buffer_offset_size.second.offset; const int64_t size = buffer_offset_size.second.size; combined_allocation->AddAssignment(*value, base + offset, size); } if (!temp_allocation.HeapTraces().empty()) { CHECK_EQ(temp_allocation.HeapTraces().size(), 1); combined_allocation->AddHeapTrace(temp_allocation.HeapTraces().front()); } if (is_private_stack) { if (temp_allocation.size() == combined_allocation->size()) { combined_allocation->peak_buffers_ = temp_allocation.peak_buffers_; } } else { combined_allocation->peak_buffers_.insert( combined_allocation->peak_buffers_.end(), temp_allocation.peak_buffers_.begin(), temp_allocation.peak_buffers_.end()); } if (temp_buffer_color.has_value()) { if (combined_allocation->color() == 0) { combined_allocation->set_color(temp_buffer_color.value()); } } } allocations_.erase(first_temp_it, allocations_.end()); for (BufferAllocation& combined : combined_allocations) { temp_allocation_total_size_ += combined.size(); allocations_.push_back(std::move(combined)); } } allocation_index_for_value_.erase(allocation_index_for_value_.begin(), allocation_index_for_value_.end()); for (size_t index = 0; index < allocations_.size(); ++index) { BufferAllocation* allocation = &allocations_[index]; allocation->set_index(index); std::vector<const HloValue*> sorted_values; sorted_values.reserve(allocation->assigned_buffers_.size()); for (const auto& buffer_offset_size : allocation->assigned_buffers_) { const HloValue* value = buffer_offset_size.first; sorted_values.emplace(sorted_values.end(), value); } absl::c_sort(sorted_values, &CompareHloValuesById); for (const HloValue* value : sorted_values) { allocation_index_for_value_[value] = index; } } } absl::Status BufferAssignment::ComputeSummaryStats() { for (auto& allocation : Allocations()) { if (allocation.is_entry_computation_parameter()) { stats_.parameter_allocation_count++; stats_.parameter_allocation_bytes += allocation.size(); } if (allocation.is_constant()) { stats_.constant_allocation_count++; stats_.constant_allocation_bytes += allocation.size(); } if (allocation.maybe_live_out()) { stats_.maybe_live_out_allocation_count++; stats_.maybe_live_out_allocation_bytes += allocation.size(); } if (allocation.IsPreallocatedTempBuffer()) { stats_.preallocated_temp_allocation_count++; stats_.preallocated_temp_allocation_bytes += allocation.size(); } stats_.total_allocation_count++; stats_.total_allocation_bytes += allocation.size(); } HloSchedule schedule(module_); bool schedule_complete = true; for (const auto& computation : module_->computations()) { if (!computation->IsFusionComputation()) { const HloInstructionSequence* sequence = hlo_ordering().SequentialOrder(*computation); if (sequence == nullptr) { schedule_complete = false; } else { schedule.set_sequence(computation, *sequence); } } } if (schedule_complete) { TF_RETURN_IF_ERROR(schedule.Verify()); TF_ASSIGN_OR_RETURN( const int64_t min_size, HeapSimulator::MinimumMemoryForModule(schedule, buffer_size_)); stats_.total_fragmentation_bytes = stats_.total_allocation_bytes - min_size; } return absl::OkStatus(); } std::string BufferAssignment::Stats::ToString() const { std::string s; StrAppendFormat(&s, "BufferAssignment stats:\n"); StrAppendFormat(&s, " parameter allocation: %10s\n", HumanReadableNumBytes(parameter_allocation_bytes)); StrAppendFormat(&s, " constant allocation: %10s\n", HumanReadableNumBytes(constant_allocation_bytes)); StrAppendFormat(&s, " maybe_live_out allocation: %10s\n", HumanReadableNumBytes(maybe_live_out_allocation_bytes)); StrAppendFormat(&s, " preallocated temp allocation: %10s\n", HumanReadableNumBytes(preallocated_temp_allocation_bytes)); if (preallocated_temp_fragmentation_bytes >= 0) { const double percent = 100. * preallocated_temp_fragmentation_bytes / preallocated_temp_allocation_bytes; StrAppendFormat( &s, " preallocated temp fragmentation: %10s (%.2f%%)\n", HumanReadableNumBytes(preallocated_temp_fragmentation_bytes), percent); } StrAppendFormat(&s, " total allocation: %10s\n", HumanReadableNumBytes(total_allocation_bytes)); if (total_fragmentation_bytes >= 0) { const double percent = 100. * total_fragmentation_bytes / total_allocation_bytes; StrAppendFormat(&s, " total fragmentation: %10s (%.2f%%)\n", HumanReadableNumBytes(total_fragmentation_bytes), percent); } return s; } std::string BufferAssignment::ToString() const { std::string output; absl::StrAppend(&output, "BufferAssignment:\n"); std::vector<const HloValue*> used_values; int64_t total_size = 0; for (auto& allocation : allocations_) { total_size += allocation.size(); absl::StrAppend(&output, allocation.ToString()); for (const auto& p : allocation.assigned_buffers()) { used_values.push_back(p.first); } } absl::StrAppend(&output, "\nTotal bytes used: ", total_size, " (", HumanReadableNumBytes(total_size), ")\n"); absl::StrAppend(&output, "\nUsed values:\n"); absl::c_sort(used_values, &CompareHloValuesById); for (const HloValue* value : used_values) { absl::StrAppend(&output, value->ToString()); } return output; } std::vector<std::pair<int64_t, const HloValue*>> TopKPeakBuffers( uint64_t k, const std::vector<BufferAllocation> allocations) { absl::btree_multimap<int64_t, const HloValue*> topk; for (const BufferAllocation& allocation : allocations) { for (const HloValue* value : allocation.PeakMemoryLogicalBuffers()) { int64_t size = allocation.assigned_buffers().at(value).size; if (topk.size() < k) { topk.insert({size, value}); } else { auto it = topk.begin(); if (size > it->first) { topk.erase(it); topk.insert({size, value}); } } } } std::vector<std::pair<int64_t, const HloValue*>> topk_descending; topk_descending.reserve(topk.size()); absl::c_reverse_copy(topk, std::back_inserter(topk_descending)); return topk_descending; } std::string BufferAssignment::ToVerboseString( size_t max_buffers_to_show) const { std::string output = absl::StrCat("BufferAssignment OOM Debugging.\n", stats_.ToString()); std::vector<std::pair<int64_t, const HloValue*>> peak_buffers = TopKPeakBuffers(max_buffers_to_show, allocations_); std::vector<std::string> buf_strs; for (size_t i = 0; i < std::min(max_buffers_to_show, peak_buffers.size()); ++i) { const HloValue* value = peak_buffers[i].second; const HloInstruction* instr = value->instruction(); int64_t size = peak_buffers[i].first; buf_strs.push_back(absl::StrCat("\n\tBuffer ", i + 1, ":\n\t\tSize: ", xla::HumanReadableNumBytes(size))); if (!instr->metadata().op_name().empty()) { buf_strs.push_back(absl::StrCat( "\n\t\tOperator: ", xla::OpMetadataToString(instr->metadata()))); } if (instr->opcode() == HloOpcode::kParameter && (instr->parent() == instr->GetModule()->entry_computation())) { buf_strs.push_back(absl::StrCat( "\n\t\tEntry Parameter Subshape: ", ShapeUtil::GetSubshape(instr->shape(), value->index()).ToString())); } else { buf_strs.push_back( absl::StrCat("\n\t\tXLA Label: ", HloOpcodeString(instr->opcode()), "\n\t\tShape: ", value->shape().ToString())); } buf_strs.push_back("\n\t\t==========================\n"); } absl::StrAppend(&output, "Peak buffers:", absl::StrJoin(buf_strs, "")); return output; } std::string BufferAssignment::BufferInfoString() const { std::string binfo; absl::StrAppend(&binfo, "buffer_id,buffer_name,offset,size," "definition_time,end_time,num_uses,use_times,use_names\n"); const HloLiveRange& live_ranges = hlo_live_range(); const auto& instruction_schedule = live_ranges.instruction_schedule(); const auto& buffer_live_ranges = live_ranges.buffer_live_ranges(); std::vector<std::pair<const HloValue*, BufferAllocation::OffsetSize>> buffers; for (const BufferAllocation& allocation : allocations_) { absl::c_copy(allocation.assigned_buffers(), std::back_inserter(buffers)); } absl::c_sort( buffers, [](const std::pair<const HloValue*, BufferAllocation::OffsetSize>& b1, const std::pair<const HloValue*, BufferAllocation::OffsetSize>& b2) { return b1.first->id() < b2.first->id(); }); for (const auto& buffer_pair : buffers) { const HloValue& buffer = *buffer_pair.first; const BufferAllocation::OffsetSize& offset_size = buffer_pair.second; if (!buffer_live_ranges.contains(&buffer)) { continue; } std::vector<std::pair<int64_t, std::string>> uses; uses.reserve(buffer.GetUses().size()); for (const HloUse& use : buffer.GetUses()) { uses.emplace_back(instruction_schedule.at(use.instruction), use.ToString()); } absl::c_sort(uses); std::vector<int64_t> use_positions; std::vector<std::string> use_names; use_positions.reserve(uses.size()); use_names.reserve(uses.size()); for (const auto& use : uses) { use_positions.push_back(use.first); use_names.push_back(use.second); } const int64_t definition_time = instruction_schedule.at(buffer.defining_position().instruction); const int64_t end_t = buffer_live_ranges.at(&buffer).end; absl::StrAppend(&binfo, buffer.id(), ","); absl::StrAppend(&binfo, "\"", buffer.ToShortString(), "\","); absl::StrAppend(&binfo, offset_size.offset, ","); absl::StrAppend(&binfo, offset_size.size, ","); absl::StrAppend(&binfo, definition_time, ","); absl::StrAppend(&binfo, end_t, ","); absl::StrAppend(&binfo, use_positions.size(), ","); absl::StrAppend(&binfo, "\"", absl::StrJoin(use_positions, ";"), "\","); absl::StrAppend(&binfo, "\"", absl::StrJoin(use_names, ";"), "\""); absl::StrAppend(&binfo, "\n"); } return binfo; } BufferAssignmentProto BufferAssignment::ToProto() const { BufferAssignmentProto proto; const HloDataflowAnalysis& dataflow = this->dataflow_analysis(); for (BufferValue::Id id = 0; id < dataflow.values().size(); id++) { auto& value = dataflow.values().at(id); if (HasAllocation(*value)) { LogicalBufferProto proto_buffer = value->ToProto(buffer_size_); proto.add_logical_buffers()->Swap(&proto_buffer); for (const HloValue* alias : alias_analysis().GetBufferContainingValue(*value).values()) { if (alias->instruction() == value->instruction() && alias->index() == value->index()) { continue; } BufferAssignmentProto::BufferAlias* proto_alias = proto.add_buffer_aliases(); LogicalBufferProto::Location proto_alias_location = BufferValue::ToLocationProto(*alias->instruction(), alias->index()); proto_alias->set_source_buffer_id(value->id()); proto_alias->mutable_location()->Swap(&proto_alias_location); } } } for (const BufferAllocation& allocation : Allocations()) { BufferAllocationProto proto_allocation = allocation.ToProto(); proto.add_buffer_allocations()->Swap(&proto_allocation); for (const HeapSimulatorTrace& heap_trace : allocation.HeapTraces()) { *proto.add_heap_simulator_traces() = heap_trace; } } return proto; } absl::StatusOr<std::unique_ptr<BufferAssignment>> BufferAssignment::FromProto( const BufferAssignmentProto& proto, const HloModule* module, BufferValue::SizeFunction buffer_size, HloDataflowAnalysis::CanShareBuffer can_share_buffer) { TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis, HloAliasAnalysis::Run(module, can_share_buffer)); auto id_to_hlo_instruction = BuildIdToHloInstructionMap(module); absl::flat_hash_map<int64_t, const HloValue*> id_to_logical_buffer; TF_ASSIGN_OR_RETURN( id_to_logical_buffer, BuildIdToLogicalBufferMap(proto, id_to_hlo_instruction, alias_analysis)); std::unique_ptr<BufferAssignment> buffer_assignment = absl::WrapUnique(new BufferAssignment( module, nullptr, std::move(buffer_size), nullptr, std::move(alias_analysis), nullptr)); for (const auto& alloc_proto : proto.buffer_allocations()) { BufferAllocation* allocation = buffer_assignment->NewEmptyAllocation( alloc_proto.size(), alloc_proto.color()); CHECK(allocation->index() == alloc_proto.index()) << "Expected allocations in BufferAssignment proto to be sorted by " "index."; allocation->set_is_thread_local(alloc_proto.is_thread_local()); allocation->set_is_tuple(alloc_proto.is_tuple()); allocation->set_constant(alloc_proto.is_constant()); if (alloc_proto.is_entry_computation_parameter()) { std::vector<int64_t> shape_idx_vals; absl::c_copy(alloc_proto.parameter_shape_index(), std::back_inserter(shape_idx_vals)); ShapeIndex shape_index(shape_idx_vals); allocation->set_entry_computation_parameter( alloc_proto.parameter_number(), shape_index, false); } for (const auto& assignee : alloc_proto.assigned()) { HloValue::Id logical_buffer_id = assignee.logical_buffer_id(); const auto& buffer_val = id_to_logical_buffer[logical_buffer_id]; buffer_assignment->AddAssignment(allocation, *buffer_val, assignee.offset(), assignee.size()); } CHECK_EQ(allocation->maybe_live_out(), alloc_proto.maybe_live_out()) << "Dataflow analysis differs from proto."; } TF_RET_CHECK(proto.logical_buffers_size() == buffer_assignment->allocation_index_for_value_.size()); for (auto& logical_buffer_proto : proto.logical_buffers()) { TF_RET_CHECK(buffer_assignment->HasAllocation( *id_to_logical_buffer[logical_buffer_proto.id()])); } return buffer_assignment; } absl::StatusOr<std::unique_ptr<BufferAssignment>> BufferAssigner::Run( const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering, BufferValue::SizeFunction buffer_size, LogicalBuffer::AlignmentFunction color_alignment, bool allocate_buffers_for_constants, BufferAssigner::Colorer colorer, std::optional<BufferAssigner::MustNotLiveOut> must_not_live_out, HloDataflowAnalysis::CanShareBuffer can_share_buffer, std::unique_ptr<PresetAssignments> preset_assignments, const PrivateStacks& private_stacks, GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare heap_buffer_interval_compare, std::optional<BufferAssignment::BufferIsolationOptions> isolation_options, std::optional<BufferValue::Color> temp_buffer_color) { BufferAssigner assigner(allocate_buffers_for_constants, std::move(colorer), must_not_live_out, std::move(preset_assignments)); return assigner.CreateAssignment( module, std::move(hlo_ordering), std::move(buffer_size), std::move(color_alignment), std::move(can_share_buffer), private_stacks, heap_buffer_interval_compare, isolation_options, temp_buffer_color); } bool BufferAssigner::LiveRangeInterferes(const HloValue* buffer1, const HloValue* buffer2, BufferAssignment* assignment) { CHECK((assignment->hlo_live_range().total_order_scheduled())); const HloLiveRange& hlo_live_range = assignment->hlo_live_range(); const auto& buffer_live_ranges = hlo_live_range.buffer_live_ranges(); auto live_range_it1 = buffer_live_ranges.find(buffer1); CHECK(live_range_it1 != buffer_live_ranges.end()) << "Buffer doesn't have a proper live range:" << buffer1->ToString(); auto live_range_it2 = buffer_live_ranges.find(buffer2); CHECK(live_range_it2 != buffer_live_ranges.end()) << "Buffer doesn't have a proper live range:" << buffer2->ToString(); auto can_share_as_operand = [&assignment](const HloValue* user_value, const HloValue* operand_value, const HloLiveRange::TimeBound& operand_live_range) { HloPosition operand_end_position = operand_live_range.end_position; return user_value->instruction()->opcode() != HloOpcode::kCopy && user_value->instruction()->IsUserOf( operand_end_position.instruction) && assignment->dataflow_analysis().CanShareOperandBufferWithUser( operand_end_position.instruction, operand_end_position.index, user_value->instruction(), user_value->index()); }; const auto& live_range_1 = live_range_it1->second; const auto& live_range_2 = live_range_it2->second; if (!(live_range_1.start > live_range_2.end || live_range_2.start > live_range_1.end)) { if (live_range_1.end == live_range_2.start) { auto operand_value = buffer1; auto user_value = buffer2; if (!can_share_as_operand(user_value, operand_value, live_range_1)) { VLOG(4) << "End of live range of " << buffer1->ToShortString() << " is equal to the start of live range of " << buffer2->ToShortString() << ", buffer cannot be shared."; return true; } } else if (live_range_2.end == live_range_1.start) { auto operand_value = buffer2; auto user_value = buffer1; if (!can_share_as_operand(user_value, operand_value, live_range_2)) { VLOG(4) << "End of live range of " << buffer2->ToShortString() << " is equal to the start of live range of " << buffer1->ToShortString() << ", buffer cannot be shared."; return true; } } else { VLOG(4) << "Can't assign: assignee " << *buffer1 << " may interfere with " << *buffer2; VLOG(4) << "assigned_buffer.start: " << live_range_1.start; VLOG(4) << "assigned_buffer.end: " << live_range_1.end; VLOG(4) << "live_range_2.start" << live_range_2.start; VLOG(4) << "live_range_2.end" << live_range_2.end; return true; } } return false; } bool BufferAssigner::MaybeAssignBuffer(BufferAllocation* allocation, const HloBuffer& hlo_buffer, BufferAssignment* assignment) { CHECK(!assignment->HasAllocation(hlo_buffer)) << "buffer " << hlo_buffer << " already has an allocation assigned."; VLOG(4) << "Trying to assign " << hlo_buffer << " size " << assignment->HloBufferSize(hlo_buffer) << " to allocation: " << *allocation; if (hlo_buffer.color() != allocation->color()) { VLOG(4) << "Can't assign: buffer has color " << hlo_buffer.color() << " and allocation has color " << allocation->color() << "."; return false; } if (assignment->HloBufferSize(hlo_buffer) > allocation->size()) { VLOG(4) << "Can't assign: buffer is larger than allocation (" << assignment->HloBufferSize(hlo_buffer) << " > " << allocation->size() << ")"; return false; } if (allocation->is_readonly()) { VLOG(4) << "Can't assign: allocation is readonly"; return false; } if (must_not_live_out_.has_value()) { if (allocation->maybe_live_out()) { for (const HloValue* value : hlo_buffer.values()) { if ((*must_not_live_out_)(assignment->alias_analysis(), value->instruction(), value->index())) { VLOG(4) << "Can't assign: " << value->instruction()->ToString() << " cannot live out of the module"; return false; } } } if (assignment->alias_analysis().BufferLivesOut(hlo_buffer)) { for (const auto& buffer_offset_size : allocation->assigned_buffers()) { const HloValue* value = buffer_offset_size.first; if ((*must_not_live_out_)(assignment->alias_analysis(), value->instruction(), value->index())) { VLOG(4) << "Can't assign: " << value->instruction() << " cannot live out of the module"; return false; } } } } if (!allocation->is_reusable()) { VLOG(4) << "Can't assign: allocation is not reusable"; return false; } for (const auto& buffer_offset_size : allocation->assigned_buffers()) { const HloValue& assigned_buffer = *CHECK_NOTNULL(dynamic_cast<const HloValue*>(buffer_offset_size.first)); for (const HloValue* new_value : hlo_buffer.values()) { if (assignment->hlo_live_range().total_order_scheduled()) { if (LiveRangeInterferes(new_value, &assigned_buffer, assignment)) { VLOG(4) << "Can't assign: assignee " << assigned_buffer << " live range interferes with " << new_value->ToShortString(); return false; } } else if (assignment->hlo_ordering().MayInterfere( assigned_buffer, *new_value, assignment->dataflow_analysis())) { VLOG(4) << "Can't assign: assignee " << assigned_buffer << " may interfere with " << new_value->ToShortString(); return false; } if (new_value->instruction()->opcode() == HloOpcode::kCopy) { for (const HloPosition& assigned_buffer_position : assigned_buffer.positions()) { if (new_value->instruction()->IsUserOf( assigned_buffer_position.instruction)) { VLOG(4) << "Can't assign: assignee " << assigned_buffer << " is used at copy instruction " << new_value->ToShortString(); return false; } } } } } if (assignment->alias_analysis().BufferLivesOut(hlo_buffer) && allocation->size() != assignment->HloBufferSize(hlo_buffer)) { VLOG(4) << "Can't assign: buffer " << hlo_buffer << "is live out and size not the same as allocation"; return false; } assignment->AddAssignment(allocation, hlo_buffer, 0, assignment->HloBufferSize(hlo_buffer)); return true; } absl::Status BufferAssigner::AssignSingleHloBuffer( const HloBuffer* hlo_buffer, bool is_thread_local, absl::flat_hash_map<const HloComputation*, absl::flat_hash_set<const HloValue*>>* buffers_to_assign_sequentially, std::vector<BufferAllocation::Index>* allocation_indices, BufferAssignment* assignment) { const int64_t buffer_size = assignment->HloBufferSize(*hlo_buffer); for (const HloValue* value : hlo_buffer->values()) { if (value->instruction()->opcode() == HloOpcode::kConstant) { if (allocate_buffers_for_constants_) { BufferAllocation* allocation = assignment->NewAllocation(*hlo_buffer, buffer_size); allocation->set_constant(true); VLOG(3) << "New allocation #" << allocation->index() << " for constant " << *hlo_buffer << " value ptr: " << value; } VLOG(3) << "Not allocating buffer for constant"; return absl::OkStatus(); } const HloInstruction* instruction = value->instruction(); const bool is_entry_parameter = instruction->opcode() == HloOpcode::kParameter && instruction->parent() == instruction->GetModule()->entry_computation(); if (is_entry_parameter) { bool parameter_has_alias = assignment->module().input_output_alias_config().ParameterHasAlias( instruction->parameter_number(), value->index()); BufferAllocation* allocation = assignment->NewAllocation(*hlo_buffer, buffer_size); allocation->set_entry_computation_parameter( instruction->parameter_number(), value->index(), parameter_has_alias); if (parameter_has_alias) { allocation_indices->push_back(allocation->index()); } VLOG(3) << "New allocation #" << allocation->index() << " marked as entry computation parameter: " << *hlo_buffer; return absl::OkStatus(); } } if (is_thread_local) { BufferAllocation* allocation = assignment->NewAllocation(*hlo_buffer, buffer_size); allocation->set_is_thread_local(true); VLOG(3) << "New allocation #" << allocation->index() << " for thread-local: " << *hlo_buffer; return absl::OkStatus(); } for (const HloValue* value : hlo_buffer->values()) { if (value->shape().IsTuple()) { BufferAllocation* allocation = assignment->NewAllocation(*hlo_buffer, buffer_size); allocation->set_is_tuple(true); VLOG(3) << "New allocation #" << allocation->index() << " for tuple-shaped buffer: " << *hlo_buffer; return absl::OkStatus(); } if (value->IsTopLevel() && !value->IsTuple()) { const HloInstruction* instruction = value->instruction(); for (auto* operand : instruction->operands()) { for (const auto& operand_slice : assignment->GetAllSlices(operand, {})) { BufferAllocation* allocation = assignment->GetMutableAllocation(operand_slice.index()); if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) { VLOG(3) << "Reusing (operand) allocation #" << allocation->index() << " for: " << *hlo_buffer; return absl::OkStatus(); } } } } } for (int allocation_index = allocation_indices->size() - 1; allocation_index >= 0; allocation_index--) { BufferAllocation* allocation = assignment->GetMutableAllocation( allocation_indices->at(allocation_index)); if (MaybeAssignBuffer(allocation, *hlo_buffer, assignment)) { VLOG(3) << "Reusing allocation #" << allocation->index() << " for: " << *hlo_buffer; return absl::OkStatus(); } } if (!assignment->HasAllocation(*hlo_buffer) && !assignment->alias_analysis().BufferLivesOut(*hlo_buffer)) { bool all_computations_have_sequential_order = true; for (const HloValue* hlo_value : hlo_buffer->values()) { HloComputation* computation = hlo_value->instruction()->parent(); const bool has_sequential_order = assignment->hlo_ordering().SequentialOrder(*computation) != nullptr; all_computations_have_sequential_order &= has_sequential_order; } if (all_computations_have_sequential_order) { for (const HloValue* hlo_value : hlo_buffer->values()) { HloComputation* computation = hlo_value->instruction()->parent(); (*buffers_to_assign_sequentially)[computation].insert(hlo_value); VLOG(3) << "Delaying assignment of temp buffer: " << *hlo_value; } return absl::OkStatus(); } } if (!assignment->HasAllocation(*hlo_buffer)) { BufferAllocation* allocation = assignment->NewAllocation(*hlo_buffer, buffer_size); allocation_indices->push_back(allocation->index()); VLOG(3) << "New allocation #" << allocation->index() << " for: " << *hlo_buffer; } TF_RET_CHECK(assignment->HasAllocation(*hlo_buffer)); return absl::OkStatus(); } absl::Status BufferAssigner::AssignBuffersForComputations( const std::vector<const HloComputation*>& computations, bool is_thread_local, absl::flat_hash_map<const HloComputation*, absl::flat_hash_set<const HloValue*>>* buffers_to_assign_sequentially, BufferAssignment* assignment) { if (computations.empty()) { return absl::OkStatus(); } std::vector<const HloBuffer*> sorted_buffers; absl::flat_hash_set<const HloBuffer*> preset_assigned_buffers; TF_RETURN_IF_ERROR(AssignPresetBuffers(&preset_assigned_buffers, assignment)); const HloAliasAnalysis& alias_analysis = assignment->alias_analysis(); for (const HloBuffer& buffer : alias_analysis.buffers()) { if (preset_assigned_buffers.find(&buffer) != preset_assigned_buffers.end()) { VLOG(3) << "Skip allocation for buffer: " << buffer; continue; } TF_RET_CHECK(!buffer.values().empty()); const HloComputation* comp = buffer.values()[0]->instruction()->parent(); if (absl::c_linear_search(computations, comp)) { sorted_buffers.push_back(&buffer); } } flat_hash_map<const HloInstruction*, int> post_order_position; int position = 0; std::vector<const HloComputation*> reverse_post_order_computations; std::unique_ptr<CallGraph> call_graph = CallGraph::Build(computations[0]->parent()); TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node) { if (absl::c_linear_search(computations, node.computation())) { reverse_post_order_computations.push_back(node.computation()); } return absl::OkStatus(); })); absl::c_reverse(reverse_post_order_computations); for (auto* computation : reverse_post_order_computations) { for (auto* instruction : computation->MakeInstructionPostOrder()) { post_order_position.emplace(instruction, position); position++; } } HloSchedule schedule(&assignment->module()); for (const HloComputation* computation : computations) { const HloInstructionSequence* instruction_sequence = assignment->hlo_ordering().SequentialOrder(*computation); const bool has_sequential_order = instruction_sequence != nullptr; if (has_sequential_order && buffers_to_assign_sequentially != nullptr) { buffers_to_assign_sequentially->emplace(computation, flat_hash_set<const HloValue*>()); schedule.set_sequence(computation, *instruction_sequence); } } absl::c_sort( sorted_buffers, [&post_order_position, &alias_analysis, assignment]( const HloBuffer* a, const HloBuffer* b) { const int64_t a_size = assignment->HloBufferSize(*a); const int64_t b_size = assignment->HloBufferSize(*b); if (a_size != b_size) { return a_size > b_size; } const bool a_live_out = alias_analysis.BufferLivesOut(*a); const bool b_live_out = alias_analysis.BufferLivesOut(*b); if (a_live_out != b_live_out) { return a_live_out; } auto compare = [&post_order_position](const HloValue* value1, const HloValue* value2) { return post_order_position.at(value1->instruction()) < post_order_position.at(value2->instruction()); }; const HloValue* a_min = *absl::c_min_element(a->values(), compare); const HloValue* b_min = *absl::c_min_element(b->values(), compare); if (post_order_position.at(a_min->instruction()) < post_order_position.at(b_min->instruction())) { return true; } else if (post_order_position.at(a_min->instruction()) > post_order_position.at(b_min->instruction())) { return false; } return a->id() < b->id(); }); std::vector<BufferAllocation::Index> allocation_indices; for (const HloBuffer* buffer : sorted_buffers) { VLOG(3) << "================================================="; VLOG(3) << "Assigning buffer for " << *buffer; TF_RETURN_IF_ERROR(AssignSingleHloBuffer(buffer, is_thread_local, buffers_to_assign_sequentially, &allocation_indices, assignment)); } return absl::OkStatus(); } flat_hash_map<LogicalBuffer::Color, flat_hash_set<const HloValue*>> BufferAssigner::SplitBuffersByColor( const flat_hash_set<const HloValue*>& buffers) const { flat_hash_map<LogicalBuffer::Color, flat_hash_set<const HloValue*>> color_map; for (auto buffer : buffers) { color_map[buffer->color()].insert(buffer); } return color_map; } absl::flat_hash_map<const HloComputation*, absl::flat_hash_set<const HloValue*>> BufferAssigner::SplitBuffersByPrivateStackComputation( const absl::flat_hash_set<const HloValue*>& buffers, absl::Span<const HloComputation* const> private_stack_computations, const CallGraph& call_graph) const { absl::flat_hash_map<const HloComputation*, absl::flat_hash_set<const HloValue*>> computation_map; for (const HloValue* value : buffers) { bool found_computation = false; for (const HloComputation* computation : private_stack_computations) { if (call_graph.InstructionIsNestedIn(value->instruction(), computation)) { found_computation = true; computation_map[computation].insert(value); break; } } CHECK(found_computation); } return computation_map; } absl::Status BufferAssigner::AssignPresetBuffers( absl::flat_hash_set<const HloBuffer*>* assigned_buffers, BufferAssignment* assignment) { if (!preset_assignments_) { return absl::OkStatus(); } absl::flat_hash_map<LogicalBuffer::Color, BufferAllocation*> preset_allocations; for (auto& color_and_info : preset_assignments_->assignment_informations()) { LogicalBuffer::Color color(color_and_info.first); auto inserted = preset_allocations.emplace( color, assignment->NewEmptyAllocation(color_and_info.second.size, color)); BufferAllocation* inserted_allocation = inserted.first->second; inserted_allocation->AddHeapTrace( color_and_info.second.heap_simulator_trace); VLOG(3) << "Created preset buffer allocation " << inserted_allocation->index() << ", color: " << inserted_allocation->color() << ", size: " << inserted_allocation->size(); } const HloAliasAnalysis& alias_analysis = assignment->alias_analysis(); for (auto& position_and_chunk : preset_assignments_->chunks()) { const HloPosition& defining_position = position_and_chunk.first; const HloBuffer& buffer = alias_analysis.GetUniqueBufferAt( defining_position.instruction, defining_position.index); for (const HloValue* value : buffer.values()) { VLOG(3) << "Preset allocation for value: " << value->ToShortString(); const HeapSimulator::Chunk& chunk = position_and_chunk.second; auto preset_allocations_iter = preset_allocations.find(value->color()); CHECK(preset_allocations_iter != preset_allocations.end()) << "No preset value allocation for color " << value->color() << " for " << value->ToShortString() << " found."; preset_allocations_iter->second->AddAssignment(*value, chunk.offset, chunk.size); } assigned_buffers->insert(&buffer); } preset_assignments_ = {}; return absl::OkStatus(); } absl::Status BufferAssigner::AssignBuffersWithSequentialOrdering( const flat_hash_map<const HloComputation*, flat_hash_set<const HloValue*>>& buffers_to_assign_sequentially, bool run_whole_module_heap_simulation, BufferAssignment* assignment, const PrivateStacks& private_stacks, GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare heap_buffer_interval_compare, std::optional<BufferAssignment::BufferIsolationOptions> isolation_options) { const HloOrdering& hlo_ordering = assignment->hlo_ordering(); auto get_heap_algorithm = [&](int64_t alignment) -> std::unique_ptr<HeapAlgorithm<HloValue>> { if (heap_buffer_interval_compare) { return std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>( assignment->multiheap_size_constraint_per_heap(), alignment, GlobalDecreasingSizeBestFitHeap<HloValue>::kCustom, heap_buffer_interval_compare); } auto algorithms = std::make_unique< std::vector<std::unique_ptr<HeapAlgorithm<HloValue>>>>(); algorithms->push_back( std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>( assignment->multiheap_size_constraint_per_heap(), alignment, GlobalDecreasingSizeBestFitHeap<HloValue>::kSpatial)); algorithms->push_back( std::make_unique<ConstrainedGlobalDecreasingSizeBestFitHeap>( assignment->multiheap_size_constraint_per_heap(), alignment, GlobalDecreasingSizeBestFitHeap<HloValue>::kTemporal)); return std::make_unique<ChooseBestHeapAlgorithm<HloValue>>( std::move(algorithms)); }; if (run_whole_module_heap_simulation) { VLOG(1) << "Running whole-module heap simulation"; HloSchedule schedule(&assignment->module()); flat_hash_set<const HloValue*> all_buffers_to_assign; for (const auto& pair : buffers_to_assign_sequentially) { const HloComputation* computation = pair.first; const flat_hash_set<const HloValue*>& buffers_to_assign = pair.second; const HloInstructionSequence* instruction_sequence = hlo_ordering.SequentialOrder(*computation); CHECK(instruction_sequence != nullptr) << computation->name(); schedule.set_sequence(computation, *instruction_sequence); all_buffers_to_assign.insert(buffers_to_assign.begin(), buffers_to_assign.end()); } auto color_map = SplitBuffersByColor(all_buffers_to_assign); std::vector<LogicalBuffer::Color> sorted_colors; sorted_colors.reserve(color_map.size()); for (auto& single_colored_set : color_map) { auto color = single_colored_set.first; sorted_colors.emplace(sorted_colors.end(), color); } absl::c_sort(sorted_colors); for (auto color : sorted_colors) { VLOG(2) << "Simulating heap for color " << color; int64_t alignment = assignment->color_alignment_(color); HeapSimulator::Options options; options.alloc_constants = allocate_buffers_for_constants_; auto private_stacks_it = private_stacks.find(color); if (private_stacks_it != private_stacks.end()) { auto computation_map = SplitBuffersByPrivateStackComputation( color_map[color], private_stacks_it->second, assignment->alias_analysis().dataflow_analysis().call_graph()); for (const HloComputation* private_stack_computation : private_stacks_it->second) { VLOG(2) << "private stack computation: " << private_stack_computation->name(); auto computation_map_it = computation_map.find(private_stack_computation); CHECK(computation_map_it != computation_map.end()); options.buffers_to_assign = &computation_map_it->second; const HloInstructionSequence* instruction_sequence = hlo_ordering.SequentialOrder(*private_stack_computation); TF_ASSIGN_OR_RETURN( HeapSimulator::Result<HloValue> result, HeapSimulator::Run( get_heap_algorithm(alignment), *private_stack_computation, *instruction_sequence, assignment->alias_analysis(), assignment->buffer_size_, &schedule, options)); AssignBuffersFromHeapSimulator(result, assignment, color, isolation_options); } } else { options.buffers_to_assign = &color_map[color]; TF_ASSIGN_OR_RETURN( HeapSimulator::Result<HloValue> result, HeapSimulator::Run(get_heap_algorithm(alignment), assignment->module(), schedule, assignment->alias_analysis(), assignment->buffer_size_, options)); AssignBuffersFromHeapSimulator(result, assignment, color, isolation_options); } } } else { VLOG(1) << "Running per-computation heap simulation"; for (const auto& pair : buffers_to_assign_sequentially) { const HloComputation* computation = pair.first; const flat_hash_set<const HloValue*>& buffers_to_assign = pair.second; const HloInstructionSequence* instruction_sequence = hlo_ordering.SequentialOrder(*computation); CHECK(instruction_sequence != nullptr) << computation->name(); auto color_map = SplitBuffersByColor(buffers_to_assign); std::vector<LogicalBuffer::Color> sorted_colors; sorted_colors.reserve(color_map.size()); for (auto& single_colored_set : color_map) { auto color = single_colored_set.first; sorted_colors.emplace(sorted_colors.end(), color); } absl::c_sort(sorted_colors); for (auto color : sorted_colors) { VLOG(2) << "Simulating heap for color " << color; int64_t alignment = assignment->color_alignment_(color); HeapSimulator::Options options; options.buffers_to_assign = &color_map[color]; TF_ASSIGN_OR_RETURN( HeapSimulator::Result<HloValue> result, HeapSimulator::Run(get_heap_algorithm(alignment), *computation, *instruction_sequence, assignment->alias_analysis(), assignment->buffer_size_, options)); AssignBuffersFromHeapSimulator(result, assignment, color, isolation_options); } } } return absl::OkStatus(); } namespace { std::vector<const HloValue*> ComputePeakMemoryLogicalBuffers( const BufferAllocation& allocation, const HeapSimulatorTrace& heap_trace) { absl::flat_hash_map<BufferValue::Id, const HloValue*> id_to_value; absl::flat_hash_map<const HloValue*, int64_t> buffer_sizes; for (const auto& pair : allocation.assigned_buffers()) { const HloValue* value = pair.first; const BufferAllocation::OffsetSize& offset_size = pair.second; id_to_value[value->id()] = value; buffer_sizes[value] = offset_size.size; } VLOG(1) << "Compute peak memory logical buffers"; absl::flat_hash_map<int64_t, int> num_outstanding_shared_buffers; absl::flat_hash_map<int64_t, int64_t> shared_canonical_ids; absl::flat_hash_map<int64_t, int64_t> allocated_sizes; auto memory_delta = [&](const HeapSimulatorTrace::Event& event) -> int64_t { const HloValue* buffer = id_to_value.at(event.buffer_id()); const int64_t buffer_size = buffer_sizes.at(buffer); if (event.kind() == HeapSimulatorTrace::Event::ALLOC) { num_outstanding_shared_buffers[event.buffer_id()] = 1; allocated_sizes[event.buffer_id()] = buffer_size; return buffer_size; } else if (event.kind() == HeapSimulatorTrace::Event::SHARE_WITH) { shared_canonical_ids[event.buffer_id()] = event.share_with_canonical_id(); if (++num_outstanding_shared_buffers[event.share_with_canonical_id()] == 1) { allocated_sizes[event.buffer_id()] = buffer_size; return buffer_size; } allocated_sizes[event.buffer_id()] = 0; return 0; } else if (event.kind() == HeapSimulatorTrace::Event::FREE) { auto shared_canonical_id_it = shared_canonical_ids.find(event.buffer_id()); int64_t buffer_id = (shared_canonical_id_it == shared_canonical_ids.end()) ? event.buffer_id() : shared_canonical_id_it->second; --num_outstanding_shared_buffers[buffer_id]; return -1 * allocated_sizes[event.buffer_id()]; } LOG(FATAL) << "Unknown event kind: " << event.kind(); }; int64_t max_live_size = 0; int64_t live_size = 0; for (const auto& event : heap_trace.events()) { if (!id_to_value.contains(event.buffer_id())) { continue; } live_size += memory_delta(event); if (max_live_size < live_size) { max_live_size = live_size; } } absl::flat_hash_set<const HloValue*> live_values; live_size = 0; num_outstanding_shared_buffers.clear(); for (const auto& event : heap_trace.events()) { if (!id_to_value.contains(event.buffer_id())) { continue; } const HloValue* value = id_to_value.at(event.buffer_id()); int64_t delta = memory_delta(event); if (delta > 0) { InsertOrDie(&live_values, value); } else if (delta < 0) { CHECK(ContainsKey(live_values, value)); live_values.erase(value); } live_size += delta; if (live_size == max_live_size) { break; } } CHECK_EQ(live_size, max_live_size); std::vector<const HloValue*> live_values_vector; live_values_vector.insert(live_values_vector.end(), live_values.begin(), live_values.end()); absl::c_sort(live_values_vector, [](const HloValue* a, const HloValue* b) { return a->id() < b->id(); }); VLOG(4) << "Peak memory buffer:"; for (auto value : live_values_vector) { VLOG(4) << " " << value->ToString(); } return live_values_vector; } } void BufferAssigner::IsolateHeapBuffers( std::optional<BufferAssignment::BufferIsolationOptions> isolation_options, const BufferAssignment* assignment, LogicalBuffer::Color color, HeapSimulator::Result<HloValue>& result) const { if (!isolation_options) { return; } result.heap_size = 0; for (HeapSimulator::HeapResult<HloValue>& heap_result : result.heap_results) { if (absl::c_find(isolation_options->config.isolation_colors(), color) != isolation_options->config.isolation_colors().end()) { VLOG(1) << "Isolating color: " << color; int64_t alignment = assignment->color_alignment_(color); std::vector<const HloValue*> sorted_values; sorted_values.reserve(heap_result.chunk_map.size()); for (const auto& [value, chunk] : heap_result.chunk_map) { sorted_values.push_back(value); } absl::c_sort(sorted_values, isolation_options->hlo_value_compare); int64_t isolation_offset = RoundUpTo(isolation_options->config.base_offset_bytes() + heap_result.heap_size + isolation_options->config.isolation_padding_bytes(), alignment); int64_t value_index; for (value_index = 0; value_index < std::min(static_cast<int64_t>(sorted_values.size()), isolation_options->config.isolation_fuel()); ++value_index) { const HloValue* value = sorted_values[value_index]; HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value); VLOG(1) << "Isolating " << value->ToShortString() << " from " << chunk.offset << " to " << isolation_offset; chunk.offset = isolation_offset; isolation_offset += RoundUpTo( chunk.size + isolation_options->config.isolation_padding_bytes(), alignment); } for (; value_index < sorted_values.size(); ++value_index) { const HloValue* value = sorted_values[value_index]; HeapSimulator::Chunk& chunk = heap_result.chunk_map.at(value); int64_t new_offset = RoundUpTo( chunk.offset + isolation_options->config.base_offset_bytes(), alignment); VLOG(1) << "Not isolating " << value->ToShortString() << ", from " << chunk.offset << " to " << new_offset; chunk.offset = new_offset; } heap_result.heap_size = isolation_offset; } result.heap_size += heap_result.heap_size; } } void BufferAssigner::AssignBuffersFromHeapSimulator( HeapSimulator::Result<HloValue>& result, BufferAssignment* assignment, BufferValue::Color color, std::optional<BufferAssignment::BufferIsolationOptions> isolation_options) { IsolateHeapBuffers(isolation_options, assignment, color, result); if (assignment->stats_.preallocated_temp_fragmentation_bytes == -1) { assignment->stats_.preallocated_temp_fragmentation_bytes = result.fragmentation_size; } else { assignment->stats_.preallocated_temp_fragmentation_bytes += result.fragmentation_size; } VLOG(1) << "Result size from heap simulator: " << result.heap_size; for (const HeapSimulator::HeapResult<HloValue>& heap_result : result.heap_results) { BufferAllocation* allocation = assignment->NewEmptyAllocation(heap_result.heap_size, color); for (const auto& [value, chunk] : heap_result.chunk_map) { assignment->AddAssignment(allocation, *value, chunk.offset, chunk.size); } allocation->peak_buffers_ = ComputePeakMemoryLogicalBuffers(*allocation, result.debug_trace); XLA_VLOG_LINES(2, allocation->ToString()); allocation->AddHeapTrace(result.debug_trace); } } absl::StatusOr<std::unique_ptr<BufferAssignment>> BufferAssigner::CreateAssignment( const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering, BufferValue::SizeFunction buffer_size, LogicalBuffer::AlignmentFunction color_alignment, HloDataflowAnalysis::CanShareBuffer can_share_buffer, const PrivateStacks& private_stacks, GlobalDecreasingSizeBestFitHeap<HloValue>::BufferIntervalCompare heap_buffer_interval_compare, std::optional<BufferAssignment::BufferIsolationOptions> isolation_options, std::optional<BufferValue::Color> temp_buffer_color) { TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis, HloAliasAnalysis::Run(module, can_share_buffer)); HloSchedule schedule(module); for (const HloComputation* computation : module->computations()) { const HloInstructionSequence* instruction_sequence = hlo_ordering->SequentialOrder(*computation); const bool has_sequential_order = instruction_sequence != nullptr; if (has_sequential_order) { schedule.set_sequence(computation, *instruction_sequence); } } TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range, HloLiveRange::Run(schedule, *alias_analysis, module->entry_computation(), true)); VLOG(1) << "Assigning buffers to module " << module->name(); XLA_VLOG_LINES(3, module->ToString()); XLA_VLOG_LINES(3, alias_analysis->ToString()); XLA_VLOG_LINES(3, alias_analysis->dataflow_analysis().ToString()); VLOG(1) << "Number of buffers to assign: " << alias_analysis->buffers().size(); std::unique_ptr<BufferAssignment> assignment(new BufferAssignment( module, std::move(hlo_ordering), std::move(buffer_size), std::move(color_alignment), std::move(alias_analysis), std::move(hlo_live_range))); TF_RETURN_IF_ERROR( colorer_(&assignment->alias_analysis(), assignment->hlo_ordering())); VLOG(3) << "After coloring:"; XLA_VLOG_LINES(3, assignment->alias_analysis().dataflow_analysis().ToString()); std::vector<const HloComputation*> thread_local_computations; std::vector<const HloComputation*> global_computations; TF_RETURN_IF_ERROR(GatherComputationsByAllocationType( module, &thread_local_computations, &global_computations)); flat_hash_map<const HloComputation*, flat_hash_set<const HloValue*>> buffers_to_assign_sequentially; TF_RETURN_IF_ERROR(AssignBuffersForComputations( global_computations, false, &buffers_to_assign_sequentially, assignment.get())); const bool run_whole_module_heap_simulation = buffers_to_assign_sequentially.size() == global_computations.size(); VLOG(2) << "Running whole module heap simulation: " << run_whole_module_heap_simulation; const int32_t multiheap_size_constraint_per_heap = module->config().debug_options().xla_multiheap_size_constraint_per_heap(); VLOG(2) << "Multiheap per heap size limit: " << multiheap_size_constraint_per_heap; TF_RETURN_IF_ERROR(AssignBuffersWithSequentialOrdering( buffers_to_assign_sequentially, run_whole_module_heap_simulation, assignment.get(), private_stacks, heap_buffer_interval_compare, isolation_options)); std::vector<const HloComputation*> thread_local_computations_no_fusion; for (auto* computation : thread_local_computations) { TF_RET_CHECK(computation != module->entry_computation()); if (computation->IsFusionComputation()) { continue; } thread_local_computations_no_fusion.push_back(computation); } TF_RETURN_IF_ERROR(AssignBuffersForComputations( thread_local_computations_no_fusion, true, nullptr, assignment.get())); for (const HloBuffer* buffer : assignment->alias_analysis().LiveOutBuffers()) { VLOG(3) << "maybe_live_out LogicalBuffer: " << *buffer; if (assignment->HasAllocation(*buffer)) { BufferAllocation* alloc = assignment->GetMutableAssignedAllocation(*buffer); alloc->set_maybe_live_out(true); VLOG(3) << "maybe_live_out BufferAllocation: " << *alloc; } } absl::flat_hash_set<BufferValue::Color> private_stack_colors; for (const auto& [color, computations] : private_stacks) { private_stack_colors.insert(color); } assignment->CombineTempAllocations(private_stack_colors, temp_buffer_color); XLA_VLOG_LINES(2, assignment->ToString()); TF_RETURN_IF_ERROR(assignment->ComputeSummaryStats()); XLA_VLOG_LINES(1, assignment->GetStats().ToString()); VLOG(1) << "Buffer assignment done."; return std::move(assignment); } }
#include "xla/service/buffer_assignment.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/buffer_value.h" #include "xla/service/call_graph.h" #include "xla/service/copy_insertion.h" #include "xla/service/flatten_call_graph.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_alias_analysis.h" #include "xla/service/hlo_buffer.h" #include "xla/service/hlo_memory_scheduler.h" #include "xla/service/hlo_ordering.h" #include "xla/service/hlo_parser.h" #include "xla/service/hlo_value.h" #include "xla/service/logical_buffer.h" #include "xla/service/memory_space_assignment/memory_space_assignment.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using memory_space_assignment::PresetAssignments; using ::testing::UnorderedElementsAre; class InstructionListVisitor : public DfsHloVisitorWithDefault { public: explicit InstructionListVisitor(const HloInstruction* root) : root_(root) {} absl::Status DefaultAction(HloInstruction* hlo) override { instructions_.push_back(hlo); VLOG(0) << "List instruction " << hlo->ToString(); return absl::OkStatus(); } std::vector<const HloInstruction*> GetInstructions() { return instructions_; } private: const HloInstruction* root_; std::vector<const HloInstruction*> instructions_; InstructionListVisitor(const InstructionListVisitor&) = delete; InstructionListVisitor& operator=(const InstructionListVisitor&) = delete; }; const std::vector<const HloInstruction*> GetInstructions(HloInstruction* root) { InstructionListVisitor main_list(root); TF_CHECK_OK(root->Accept(&main_list)); return main_list.GetInstructions(); } class BufferAssignmentTest : public HloTestBase { protected: ~BufferAssignmentTest() override {} std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module, int64_t alignment = 1) { return BufferAssigner::Run( module, std::make_unique<DependencyHloOrdering>(module), backend().compiler()->BufferSizeBytesFunction(), [alignment](LogicalBuffer::Color) { return alignment; }, true) .value(); } absl::StatusOr<std::unique_ptr<BufferAssignment>> ConvertToProtoAndBack( const BufferAssignment* buffers, const HloModule* module) { auto proto = buffers->ToProto(); return BufferAssignment::FromProto( proto, module, backend().compiler()->BufferSizeBytesFunction(), nullptr); } std::unique_ptr<BufferAssignment> RunBufferAssignmentWithSequentialOrdering( HloModule* module, int64_t alignment = 1, BufferAssigner::Colorer colorer = BufferAssigner::DefaultColorer(), const BufferAssigner::PrivateStacks& private_stacks = {}, std::optional<BufferAssignment::BufferIsolationOptions> isolation_options = std::nullopt) { return BufferAssigner::Run( module, std::make_unique<SequentialHloOrdering>(module->schedule()), backend().compiler()->BufferSizeBytesFunction(), [alignment](LogicalBuffer::Color) { return alignment; }, true, colorer, std::nullopt, nullptr, {}, private_stacks, nullptr, isolation_options) .value(); } std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersForConstants( HloModule* module, int64_t alignment = 1) { return BufferAssigner::Run( module, std::make_unique<DependencyHloOrdering>(module), backend().compiler()->BufferSizeBytesFunction(), [alignment](LogicalBuffer::Color) { return alignment; }, false) .value(); } std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersReuseForAdd( HloModule* module, int64_t alignment = 1) { auto must_not_live_out = [](const HloAliasAnalysis& alias_analysis, const HloInstruction* instruction, const ShapeIndex&) { return instruction->opcode() == HloOpcode::kAdd; }; return BufferAssigner::Run( module, std::make_unique<DependencyHloOrdering>(module), backend().compiler()->BufferSizeBytesFunction(), [alignment](LogicalBuffer::Color) { return alignment; }, false, BufferAssigner::DefaultColorer(), must_not_live_out) .value(); } std::unique_ptr<BufferAssignment> RunColoredBufferAssignment( HloModule* module, BufferAssigner::Colorer colorer, int64_t alignment = 1) { return BufferAssigner::Run( module, std::make_unique<DependencyHloOrdering>(module), backend().compiler()->BufferSizeBytesFunction(), [alignment](LogicalBuffer::Color) { return alignment; }, true, std::move(colorer)) .value(); } std::unique_ptr<BufferAssignment> RunBufferAssignmentWithInstructionSequence( HloModule* module, absl::Span<HloInstruction* const> instruction_sequence, int64_t alignment = 1) { HloSchedule schedule(module); schedule.set_sequence(module->entry_computation(), instruction_sequence); return BufferAssigner::Run( module, std::make_unique<SequentialHloOrdering>(schedule), backend().compiler()->BufferSizeBytesFunction(), [alignment](LogicalBuffer::Color) { return alignment; }, true) .value(); } std::unique_ptr<BufferAssignment> RunBufferAssignmentWithPresetAssignments( HloModule* module, std::unique_ptr<PresetAssignments> preset_assignments, int64_t alignment = 1) { return BufferAssigner::Run( module, std::make_unique<DependencyHloOrdering>(module), backend().compiler()->BufferSizeBytesFunction(), [alignment](LogicalBuffer::Color) { return alignment; }, true, BufferAssigner::DefaultColorer(), std::nullopt, nullptr, std::move(preset_assignments)) .value(); } std::unique_ptr<BufferAssignment> RunBufferAssignmentWithIsolationOptions( HloModule* module, std::optional<BufferAssignment::BufferIsolationOptions> isolation_options = std::nullopt) { return BufferAssigner::Run( module, std::make_unique<SequentialHloOrdering>(module->schedule()), backend().compiler()->BufferSizeBytesFunction(), [](LogicalBuffer::Color) { return 1; }, true, BufferAssigner::DefaultColorer(), std::nullopt, nullptr, {}, {}, nullptr, isolation_options) .value(); } std::unique_ptr<HloComputation> BuildMapComputationPlus1( const std::string& name) { auto builder = HloComputation::Builder(name); auto param = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x")); auto value = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, value)); return builder.Build(); } std::unique_ptr<HloComputation> BuildReduceComputation( const std::string& name) { auto builder = HloComputation::Builder(name); auto param = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x")); auto param2 = builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "y")); builder.AddInstruction( HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, param2)); return builder.Build(); } std::unique_ptr<HloComputation> BuildWhileConditionComputation( const std::string& name) { auto builder = HloComputation::Builder(name); auto const4 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4))); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, t_s32_f32v4_, "x")); auto index = builder.AddInstruction( HloInstruction::CreateGetTupleElement(const4->shape(), param, 0)); builder.AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), index, const4, ComparisonDirection::kLt)); return builder.Build(); } std::unique_ptr<HloComputation> BuildWhileBodyComputation( const std::string& name) { auto builder = HloComputation::Builder(name); auto const1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1))); auto constv = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, t_s32_f32v4_, "x")); auto indexc = builder.AddInstruction( HloInstruction::CreateGetTupleElement(const1->shape(), param, 0)); auto addc = builder.AddInstruction(HloInstruction::CreateBinary( indexc->shape(), HloOpcode::kAdd, indexc, const1)); auto indexv = builder.AddInstruction( HloInstruction::CreateGetTupleElement(constv->shape(), param, 1)); auto addv = builder.AddInstruction(HloInstruction::CreateBinary( constv->shape(), HloOpcode::kAdd, indexv, constv)); builder.AddInstruction(HloInstruction::CreateTuple({addc, addv})); return builder.Build(); } std::unique_ptr<HloComputation> BuildR0F32UnaryOpComputation( HloOpcode opcode, const std::string& name) { auto builder = HloComputation::Builder(name); auto param = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x")); builder.AddInstruction(HloInstruction::CreateUnary(r0f32_, opcode, param)); return builder.Build(); } const BufferAllocation& GetAssignedInputAllocation( const BufferAssignment& buffers, HloInstruction* hlo) { LOG(INFO) << "Checking input: " << hlo->ToString(); const BufferAllocation& buffer = *buffers.GetUniqueTopLevelSlice(hlo).value().allocation(); EXPECT_EQ(hlo->parameter_number(), buffer.parameter_number()); return buffer; } const BufferAllocation& GetAssignedOutputAllocation( const BufferAssignment& buffers, HloInstruction* hlo) { LOG(INFO) << "Checking output: " << hlo->ToString(); const BufferAllocation& buffer = GetTopLevelAllocation(buffers, hlo); return buffer; } const BufferAllocation& GetAllocation(const BufferAssignment& buffers, const HloInstruction* hlo, const ShapeIndex& index) { return *buffers.GetUniqueSlice(hlo, index).value().allocation(); } const BufferAllocation& GetTopLevelAllocation(const BufferAssignment& buffers, const HloInstruction* hlo) { return *buffers.GetUniqueTopLevelSlice(hlo).value().allocation(); } int64_t ValidateBuffers( const std::vector<const HloInstruction*>& instructions, const BufferAssignment& buffers) { for (const HloInstruction* hlo : instructions) { if (!buffers.HasTopLevelAllocation(hlo)) { EXPECT_TRUE(HloOpcode::kConstant == hlo->opcode() || HloOpcode::kParameter == hlo->opcode()); continue; } } int64_t total_size = 0; for (auto& allocation : buffers.Allocations()) { total_size += allocation.size(); } return total_size; } Shape s32_ = ShapeUtil::MakeShape(xla::S32, {}); Shape r0f32_ = ShapeUtil::MakeShape(xla::F32, {}); Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4}); Shape f32vec10_ = ShapeUtil::MakeShape(F32, {10}); Shape f32vec100_ = ShapeUtil::MakeShape(F32, {100}); Shape f32a100x10_ = ShapeUtil::MakeShape(F32, {100, 10}); Shape t_s32_f32v4_ = ShapeUtil::MakeTupleShape({s32_, f32vec4_}); Shape t_s32_f32v10_ = ShapeUtil::MakeTupleShape({s32_, f32vec10_}); }; static bool BuffersDistinct(const std::vector<const HloInstruction*>& a, const std::vector<const HloInstruction*>& b, const BufferAssignment& assignment) { absl::flat_hash_set<BufferAllocation::Slice> a_slices; for (const HloInstruction* instruction : a) { if (assignment.HasTopLevelAllocation(instruction)) { a_slices.insert(assignment.GetUniqueTopLevelSlice(instruction).value()); } } for (const HloInstruction* instruction : b) { if (assignment.HasTopLevelAllocation(instruction)) { if (a_slices.contains( assignment.GetUniqueTopLevelSlice(instruction).value())) { return false; } } } return true; } TEST_F(BufferAssignmentTest, ScalarConstant) { auto builder = HloComputation::Builder(TestName()); auto const0 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); { auto buffers = RunBufferAssignment(module.get()); EXPECT_TRUE(buffers->HasTopLevelAllocation(const0)); } { auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get()); EXPECT_FALSE(buffers->HasTopLevelAllocation(const0)); } } TEST_F(BufferAssignmentTest, BufferForConst) { auto builder = HloComputation::Builder(TestName()); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto const1 = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f}))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, const0, const1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); { auto buffers = RunBufferAssignment(module.get()); EXPECT_TRUE(buffers->HasTopLevelAllocation(const0)); EXPECT_TRUE(buffers->HasTopLevelAllocation(const1)); GetAssignedOutputAllocation(*buffers, add); } { auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get()); EXPECT_FALSE(buffers->HasTopLevelAllocation(const0)); EXPECT_FALSE(buffers->HasTopLevelAllocation(const1)); GetAssignedOutputAllocation(*buffers, add); } } TEST_F(BufferAssignmentTest, HasAllocationAt) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "param0")); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1))); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0)); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({negate, param0, constant})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers = RunBufferAssignment(module.get()); EXPECT_EQ(buffers->HasTopLevelAllocation(tuple), buffers->HasAllocationAt(tuple, {})); EXPECT_EQ(buffers->HasTopLevelAllocation(negate), buffers->HasAllocationAt(tuple, {0})); EXPECT_EQ(buffers->HasTopLevelAllocation(param0), buffers->HasAllocationAt(tuple, {1})); EXPECT_EQ(buffers->HasTopLevelAllocation(constant), buffers->HasAllocationAt(tuple, {2})); } TEST_F(BufferAssignmentTest, BufferForOutputConst) { auto builder = HloComputation::Builder(TestName()); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto copy = builder.AddInstruction( HloInstruction::CreateUnary(const0->shape(), HloOpcode::kCopy, const0)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers = RunBufferAssignment(module.get()); GetAssignedOutputAllocation(*buffers, copy); } TEST_F(BufferAssignmentTest, Basic) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1)); auto sub = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kSubtract, add, param1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers_orig = RunBufferAssignment(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> buffers, ConvertToProtoAndBack(buffers_orig.get(), module.get())); BufferAllocation paramscalar_buffer = GetAssignedInputAllocation(*buffers, paramscalar); BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0); BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1); EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index()); EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index()); EXPECT_NE(param0_buffer.index(), param1_buffer.index()); const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul); EXPECT_NE(mul_buffer.index(), param0_buffer.index()); const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add); EXPECT_EQ(add_buffer.index(), mul_buffer.index()); GetAssignedOutputAllocation(*buffers, sub); } TEST_F(BufferAssignmentTest, BasicToFromProto) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1)); builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kSubtract, add, param1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers_orig = RunBufferAssignment(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> buffers_from_proto, ConvertToProtoAndBack(buffers_orig.get(), module.get())); const HloDataflowAnalysis& dataflow_orig = buffers_orig->dataflow_analysis(); const HloDataflowAnalysis& dataflow_proto = buffers_from_proto->dataflow_analysis(); EXPECT_EQ(buffers_orig->Allocations().size(), buffers_from_proto->Allocations().size()); for (BufferValue::Id id = 0; id < dataflow_orig.values().size(); id++) { auto& orig_value = dataflow_orig.values().at(id); if (buffers_orig->HasAllocation(*orig_value)) { auto& value_proto = dataflow_proto.GetUniqueValueAt( orig_value->instruction(), orig_value->index()); EXPECT_TRUE(buffers_from_proto->HasAllocation(value_proto)); EXPECT_EQ(orig_value->color(), value_proto.color()); EXPECT_EQ(buffers_orig->GetAssignedAllocation(*orig_value).index(), buffers_from_proto->GetAssignedAllocation(value_proto).index()); } } } TEST_F(BufferAssignmentTest, AliasedParamCanBeReused) { auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "p0")); auto neg_1 = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param)); auto neg_2 = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, neg_1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({}, 0, {})); auto buffers_orig = RunBufferAssignment(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> buffers, ConvertToProtoAndBack(buffers_orig.get(), module.get())); BufferAllocation param_buffer = GetAssignedInputAllocation(*buffers, param); BufferAllocation neg_1_buffer = GetAllocation(*buffers, neg_1, {}); BufferAllocation neg_2_buffer = GetAllocation(*buffers, neg_2, {}); EXPECT_EQ(param_buffer.index(), neg_1_buffer.index()); EXPECT_EQ(neg_2_buffer.index(), neg_1_buffer.index()); } TEST_F(BufferAssignmentTest, AddCannotReuse) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1)); auto sub = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kSubtract, add, param1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers_orig = RunBufferAssignmentNoBuffersReuseForAdd(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> buffers, ConvertToProtoAndBack(buffers_orig.get(), module.get())); BufferAllocation paramscalar_buffer = GetAssignedInputAllocation(*buffers, paramscalar); BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0); BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1); EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index()); EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index()); EXPECT_NE(param0_buffer.index(), param1_buffer.index()); const BufferAllocation& sub_buffer = GetTopLevelAllocation(*buffers, sub); EXPECT_NE(sub_buffer.index(), param0_buffer.index()); const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add); EXPECT_NE(add_buffer.index(), sub_buffer.index()); GetAssignedOutputAllocation(*buffers, sub); } TEST_F(BufferAssignmentTest, BasicUniquelyColored) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1)); auto sub = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kSubtract, add, param1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); absl::flat_hash_map<const HloInstruction*, int> color_map; auto colorer = [&](HloAliasAnalysis* alias_analysis, const HloOrdering&) { int color = 0; for (HloValue::Id id = 0; id < alias_analysis->dataflow_analysis().values().size(); id++) { auto& value = alias_analysis->dataflow_analysis().GetValue(id); color_map[value.defining_instruction()] = color; value.set_color(BufferValue::Color(color++)); } return absl::OkStatus(); }; auto buffers = RunColoredBufferAssignment(module.get(), colorer); BufferAllocation paramscalar_buffer = GetAssignedInputAllocation(*buffers, paramscalar); BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0); BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1); EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index()); EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index()); EXPECT_NE(param0_buffer.index(), param1_buffer.index()); const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul); EXPECT_NE(mul_buffer.index(), param0_buffer.index()); const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add); EXPECT_NE(add_buffer.index(), mul_buffer.index()); GetAssignedOutputAllocation(*buffers, sub); EXPECT_EQ(param0->shape().layout().memory_space(), color_map[param0]); EXPECT_EQ(param1->shape().layout().memory_space(), color_map[param1]); EXPECT_EQ(mul->shape().layout().memory_space(), color_map[mul]); EXPECT_EQ(add->shape().layout().memory_space(), color_map[add]); EXPECT_EQ(sub->shape().layout().memory_space(), color_map[sub]); } TEST_F(BufferAssignmentTest, BasicPartiallyColored) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1)); auto sub = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kSubtract, add, param1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) { for (HloValue::Id id = 0; id < alias_analysis->dataflow_analysis().values().size(); id++) { auto& value = alias_analysis->dataflow_analysis().GetValue(id); auto& buffer = alias_analysis->GetBufferContainingValue(value); for (const auto& alias : buffer.values()) { if (alias->instruction()->opcode() == HloOpcode::kAdd || alias->instruction()->opcode() == HloOpcode::kMultiply) { value.set_color(LogicalBuffer::Color(1)); } } if (!value.has_color()) { value.set_color(LogicalBuffer::Color(0)); } } return absl::OkStatus(); }; auto buffers = RunColoredBufferAssignment(module.get(), colorer); BufferAllocation paramscalar_buffer = GetAssignedInputAllocation(*buffers, paramscalar); BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0); BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1); EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index()); EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index()); EXPECT_NE(param0_buffer.index(), param1_buffer.index()); const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul); EXPECT_NE(mul_buffer.index(), param0_buffer.index()); const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add); EXPECT_EQ(add_buffer.index(), mul_buffer.index()); GetAssignedOutputAllocation(*buffers, sub); EXPECT_EQ(mul->shape().layout().memory_space(), 1); EXPECT_EQ(add->shape().layout().memory_space(), 1); EXPECT_EQ(sub->shape().layout().memory_space(), 0); EXPECT_EQ(param0->shape().layout().memory_space(), 0); EXPECT_EQ(param1->shape().layout().memory_space(), 0); } TEST_F(BufferAssignmentTest, PresetAssignments) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); Shape f32vec100_color1 = ShapeUtil::MakeShapeWithDenseLayout( F32, {100}, {0}, {}, 1, 0, 1); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_color1, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_color1, HloOpcode::kAdd, mul, param1)); auto sub = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kSubtract, add, param1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto preset_assignments = std::make_unique<PresetAssignments>(); preset_assignments->add_chunk({mul, {}}, HeapSimulator::Chunk::FromOffsetSize(100, 400)); preset_assignments->add_chunk({add, {}}, HeapSimulator::Chunk::FromOffsetSize(550, 400)); preset_assignments->assignment_information_for_space(1) ->size = 950; auto buffers = RunBufferAssignmentWithPresetAssignments( module.get(), std::move(preset_assignments)); BufferAllocation paramscalar_buffer = GetAssignedInputAllocation(*buffers, paramscalar); BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0); BufferAllocation param1_buffer = GetAssignedInputAllocation(*buffers, param1); EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index()); EXPECT_NE(paramscalar_buffer.index(), param1_buffer.index()); EXPECT_EQ(paramscalar_buffer.color(), LogicalBuffer::Color(0)); EXPECT_NE(param0_buffer.index(), param1_buffer.index()); EXPECT_EQ(param0_buffer.color(), LogicalBuffer::Color(0)); const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul); const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add); EXPECT_EQ(mul_buffer, add_buffer); EXPECT_NE(mul_buffer.index(), param0_buffer.index()); EXPECT_EQ(mul_buffer.color(), LogicalBuffer::Color(1)); EXPECT_EQ(mul_buffer.assigned_buffers().size(), 2); for (const auto& value_and_offsetsize : mul_buffer.assigned_buffers()) { if (value_and_offsetsize.first->instruction() == mul) { EXPECT_EQ(value_and_offsetsize.second.offset, 100); EXPECT_EQ(value_and_offsetsize.second.size, 400); } else { EXPECT_EQ(value_and_offsetsize.first->instruction(), add); EXPECT_EQ(value_and_offsetsize.second.offset, 550); EXPECT_EQ(value_and_offsetsize.second.size, 400); } } GetAssignedOutputAllocation(*buffers, sub); } TEST_F(BufferAssignmentTest, PresetAssignmentsWhile) { auto module = CreateNewVerifiedModule(); Shape f32vec10_color1 = ShapeUtil::MakeShapeWithDenseLayout( F32, {10}, {0}, {}, 1, 0, 1); Shape t_s32_f32v10_color1 = ShapeUtil::MakeTupleShape({s32_, f32vec10_color1}); auto cond_builder = HloComputation::Builder("WhileCond"); HloInstruction* cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, t_s32_f32v10_color1, "cond_param")); HloInstruction* cond_iter = cond_builder.AddInstruction( HloInstruction::CreateGetTupleElement(s32_, cond_param, 0)); HloInstruction* cond_limit = cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(50))); cond_builder.AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter, cond_limit, ComparisonDirection::kLt)); HloComputation* cond_computation = module->AddEmbeddedComputation(cond_builder.Build()); auto body_builder = HloComputation::Builder("WhileBody"); HloInstruction* body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, t_s32_f32v10_color1, "body_param")); HloInstruction* body_iter = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(s32_, body_param, 0)); HloInstruction* body_data = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(f32vec10_color1, body_param, 1)); HloInstruction* body_data_increment = body_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>( {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}))); HloInstruction* body_data_next = body_builder.AddInstruction(HloInstruction::CreateBinary( f32vec10_color1, HloOpcode::kAdd, body_data, body_data_increment)); HloInstruction* body_iter_increment = body_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1))); HloInstruction* body_iter_next = body_builder.AddInstruction(HloInstruction::CreateBinary( s32_, HloOpcode::kAdd, body_iter, body_iter_increment)); body_builder.AddInstruction( HloInstruction::CreateTuple({body_iter_next, body_data_next})); HloComputation* body_computation = module->AddEmbeddedComputation(body_builder.Build()); auto builder = HloComputation::Builder(TestName()); HloInstruction* iter = builder.AddInstruction( HloInstruction::CreateParameter(0, s32_, "param_iter")); HloInstruction* data = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec10_, "param_data")); HloInstruction* negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec10_color1, HloOpcode::kNegate, data)); HloInstruction* tuple = builder.AddInstruction(HloInstruction::CreateTuple({iter, negate})); HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile( t_s32_f32v10_color1, cond_computation, body_computation, tuple)); HloInstruction* while_data = builder.AddInstruction( HloInstruction::CreateGetTupleElement(f32vec10_color1, while_op, 1)); builder.AddInstruction(HloInstruction::CreateBinary( f32vec10_, HloOpcode::kAdd, while_data, data)); module->AddEntryComputation(builder.Build()); auto preset_assignments = std::make_unique<PresetAssignments>(); preset_assignments->add_chunk({negate, {}}, HeapSimulator::Chunk::FromOffsetSize(100, 40)); preset_assignments->assignment_information_for_space(1) ->size = 140; auto buffers_orig = RunBufferAssignmentWithPresetAssignments( module.get(), std::move(preset_assignments)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> buffers, ConvertToProtoAndBack(buffers_orig.get(), module.get())); const BufferAllocation& data_buffer = GetTopLevelAllocation(*buffers, negate); EXPECT_EQ(data_buffer.assigned_buffers().size(), 5); for (const auto& value_and_offsetsize : data_buffer.assigned_buffers()) { EXPECT_EQ(value_and_offsetsize.second.offset, 100); EXPECT_EQ(value_and_offsetsize.second.size, 40); EXPECT_EQ(value_and_offsetsize.first->color(), LogicalBuffer::Color(1)); } } TEST_F(BufferAssignmentTest, MultipleUsersForNode) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1)); auto sub = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kSubtract, add, mul)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers_orig = RunBufferAssignment(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> buffers, ConvertToProtoAndBack(buffers_orig.get(), module.get())); BufferAllocation paramscalar_buffer = GetAssignedInputAllocation(*buffers, paramscalar); BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0); BufferAllocation param1_index = GetAssignedInputAllocation(*buffers, param1); EXPECT_NE(paramscalar_buffer.index(), param0_buffer.index()); EXPECT_NE(paramscalar_buffer.index(), param1_index.index()); EXPECT_NE(param0_buffer.index(), param1_index.index()); const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul); const BufferAllocation& add_buffer = GetTopLevelAllocation(*buffers, add); EXPECT_NE(add_buffer.index(), mul_buffer.index()); const std::vector<const HloInstruction*> level0 = GetInstructions(sub); int64_t size0 = ValidateBuffers(level0, *buffers); LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size() << " for " << level0.size() << " instructions; " << "total buffer size " << size0; } TEST_F(BufferAssignmentTest, TrivialMap) { auto module = CreateNewVerifiedModule(); auto map_computation = module->AddEmbeddedComputation(BuildMapComputationPlus1("f32+1")); auto inner_last = map_computation->root_instruction(); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32a100x10_, "p")); auto map = builder.AddInstruction( HloInstruction::CreateMap(f32a100x10_, {param0}, map_computation)); module->AddEntryComputation(builder.Build()); const std::vector<const HloInstruction*> level0 = GetInstructions(map); EXPECT_EQ(2, level0.size()) << "Invalid main kernel size"; const std::vector<const HloInstruction*> level1 = GetInstructions(inner_last); EXPECT_EQ(3, level1.size()) << "Invalid nested add+1 size"; auto buffers = RunBufferAssignment(module.get()); int64_t size0 = ValidateBuffers(level0, *buffers); int64_t size1 = ValidateBuffers(level1, *buffers); EXPECT_TRUE(BuffersDistinct(level0, level1, *buffers)) << "Reuse between main kernel and embedded mapping."; BufferAllocation param0_buffer = GetAssignedInputAllocation(*buffers, param0); BufferAllocation map_buffer = GetAssignedOutputAllocation(*buffers, map); EXPECT_NE(param0_buffer.index(), map_buffer.index()); EXPECT_EQ(HloOpcode::kAdd, inner_last->opcode()); const BufferAllocation& inner_add_buffer = GetTopLevelAllocation(*buffers, inner_last); EXPECT_NE(inner_add_buffer.index(), map_buffer.index()); LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size() << " for " << level0.size() + level1.size() << " instructions; " << "total buffer size " << size0 + size1; } TEST_F(BufferAssignmentTest, CannotReuseInputBufferOfReduce) { auto module = CreateNewVerifiedModule(); auto reduce_computation = module->AddEmbeddedComputation(BuildReduceComputation("f32+f32")); auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32a100x10_, "p")); auto exp1 = builder.AddInstruction( HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, param0)); auto exp2 = builder.AddInstruction( HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, exp1)); auto const0 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); auto reduce = builder.AddInstruction(HloInstruction::CreateReduce( f32vec10_, exp2, const0, {0}, reduce_computation)); auto exp3 = builder.AddInstruction( HloInstruction::CreateUnary(f32vec10_, HloOpcode::kExp, reduce)); module->AddEntryComputation(builder.Build()); auto buffers = RunBufferAssignment(module.get()); const std::vector<const HloInstruction*> instrs = GetInstructions(exp3); ValidateBuffers(instrs, *buffers); const BufferAllocation& exp1_buffer = GetTopLevelAllocation(*buffers, exp1); const BufferAllocation& exp2_buffer = GetTopLevelAllocation(*buffers, exp2); const BufferAllocation& reduce_buffer = GetTopLevelAllocation(*buffers, reduce); EXPECT_EQ(exp1_buffer.index(), exp2_buffer.index()); EXPECT_NE(exp2_buffer.index(), reduce_buffer.index()); } TEST_F(BufferAssignmentTest, ExampleWhile) { auto module = CreateNewVerifiedModule(); auto condition_computation = module->AddEmbeddedComputation(BuildWhileConditionComputation("if<4")); auto body_computation = module->AddEmbeddedComputation(BuildWhileBodyComputation("add-update")); auto builder = HloComputation::Builder(TestName()); auto const3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0))); auto const4 = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({const3, const4})); auto while_op = builder.AddInstruction(HloInstruction::CreateWhile( t_s32_f32v4_, condition_computation, body_computation, tuple)); module->AddEntryComputation(builder.Build()); const std::vector<const HloInstruction*> level0 = GetInstructions(while_op); EXPECT_EQ(4, level0.size()) << "Invalid while kernel size"; const std::vector<const HloInstruction*> levelc = GetInstructions(condition_computation->root_instruction()); EXPECT_EQ(4, levelc.size()) << "Invalid nested condition size"; const std::vector<const HloInstruction*> levelb = GetInstructions(body_computation->root_instruction()); EXPECT_EQ(8, levelb.size()) << "Invalid nested body size"; auto buffers = RunBufferAssignment(module.get()); int64_t size0 = ValidateBuffers(level0, *buffers); int64_t sizec = ValidateBuffers(levelc, *buffers); int64_t sizeb = ValidateBuffers(levelb, *buffers); EXPECT_FALSE(BuffersDistinct(level0, levelc, *buffers)) << "Should be reuse between main kernel and embedded condition."; EXPECT_FALSE(BuffersDistinct(levelb, levelc, *buffers)) << "Should be reuse between embedded condition and body."; EXPECT_FALSE(BuffersDistinct(level0, levelb, *buffers)) << "Should be reuse between main kernel and embedded body."; HloInstruction* body_root = body_computation->root_instruction(); EXPECT_EQ(HloOpcode::kTuple, body_root->opcode()); ShapeUtil::ForEachSubshape( while_op->shape(), [this, &buffers, while_op, body_root](const Shape& , const ShapeIndex& index) { auto while_op_allocation = GetAllocation(*buffers, while_op, index); auto body_root_allocation = GetAllocation(*buffers, body_root, index); EXPECT_EQ(while_op_allocation.index(), body_root_allocation.index()); }); LOG(INFO) << "LogicalBuffer count " << buffers->Allocations().size() << " for " << level0.size() + levelc.size() + levelb.size() << " instructions; total buffer size " << size0 + sizec + sizeb; } TEST_F(BufferAssignmentTest, ExampleConditional) { auto module = CreateNewVerifiedModule(); auto true_computation = module->AddEmbeddedComputation( BuildR0F32UnaryOpComputation(HloOpcode::kCeil, "Ceil")); auto false_computation = module->AddEmbeddedComputation( BuildR0F32UnaryOpComputation(HloOpcode::kFloor, "Floor")); auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); auto const1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(56.4f))); auto const2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(12.4f))); auto conditional = builder.AddInstruction(HloInstruction::CreateConditional( r0f32_, pred, const1, true_computation, const2, false_computation)); module->AddEntryComputation(builder.Build()); const std::vector<const HloInstruction*> conditional_instrs = GetInstructions(conditional); const std::vector<const HloInstruction*> true_instrs = GetInstructions(true_computation->root_instruction()); const std::vector<const HloInstruction*> false_instrs = GetInstructions(false_computation->root_instruction()); EXPECT_EQ(4, conditional_instrs.size()); EXPECT_EQ(2, true_instrs.size()); EXPECT_EQ(2, false_instrs.size()); auto buffers = RunBufferAssignment(module.get()); ValidateBuffers(conditional_instrs, *buffers); ValidateBuffers(true_instrs, *buffers); ValidateBuffers(false_instrs, *buffers); EXPECT_FALSE(BuffersDistinct(conditional_instrs, true_instrs, *buffers)) << "Should be reuse between conditional and true computation."; EXPECT_FALSE(BuffersDistinct(conditional_instrs, false_instrs, *buffers)) << "Should be reuse between conditional and false computation."; EXPECT_FALSE(BuffersDistinct(true_instrs, false_instrs, *buffers)) << "Should be reuse between true and false computations."; const BufferAllocation& conditional_buffer = GetTopLevelAllocation(*buffers, conditional); const BufferAllocation& true_buffer = GetTopLevelAllocation(*buffers, true_computation->root_instruction()); const BufferAllocation& false_buffer = GetTopLevelAllocation(*buffers, false_computation->root_instruction()); EXPECT_EQ(conditional_buffer.size(), true_buffer.size()); EXPECT_EQ(conditional_buffer.size(), false_buffer.size()); } TEST_F(BufferAssignmentTest, UnaryOpReuseChain) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "p")); auto exp1 = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, param0)); auto tanh = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kTanh, exp1)); auto exp2 = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kExp, tanh)); auto neg = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, exp2)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_TRUE(assignment->HasTopLevelAllocation(exp1)); auto& buffer_for_exp1 = GetTopLevelAllocation(*assignment, exp1); EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, tanh)); EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, exp2)); EXPECT_EQ(buffer_for_exp1, GetTopLevelAllocation(*assignment, neg)); } TEST_F(BufferAssignmentTest, ReuseNonOperandBuffer) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "param0")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0)); auto slice = builder.AddInstruction( HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1})); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32a100x10_, slice, {1})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast)); auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast); EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate)); EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice)); } TEST_F(BufferAssignmentTest, NoReuseLiveBuffer) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "param0")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0)); auto slice = builder.AddInstruction( HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1})); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32a100x10_, slice, {1})); builder.AddInstruction(HloInstruction::CreateTuple({negate, broadcast})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment_orig = RunBufferAssignment(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> assignment, ConvertToProtoAndBack(assignment_orig.get(), module.get())); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, negate)); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, slice)); EXPECT_NE(GetTopLevelAllocation(*assignment, negate), GetTopLevelAllocation(*assignment, slice)); } TEST_F(BufferAssignmentTest, NoReuseAliasedBuffer) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "param0")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({negate})); auto tuple_element = builder.AddInstruction( HloInstruction::CreateGetTupleElement(f32vec100_, tuple, 0)); auto slice = builder.AddInstruction( HloInstruction::CreateSlice(f32vec10_, tuple_element, {0}, {10}, {1})); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32a100x10_, slice, {1})); builder.AddInstruction(HloInstruction::CreateTuple({tuple, broadcast})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment_orig = RunBufferAssignment(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> assignment, ConvertToProtoAndBack(assignment_orig.get(), module.get())); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, negate)); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, slice)); EXPECT_NE(GetTopLevelAllocation(*assignment, negate), GetTopLevelAllocation(*assignment, slice)); } TEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBuffer) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "param0")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0)); auto slice = builder.AddInstruction( HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1})); auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {10, 4}), slice, {0})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, negate)); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, slice)); } TEST_F(BufferAssignmentTest, ReuseOutputBufferIfExactlySized) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "param0")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0)); auto slice = builder.AddInstruction( HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1})); auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {10, 10}), slice, {0})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_TRUE(assignment->HasTopLevelAllocation(broadcast)); auto& buffer_for_bcast = GetTopLevelAllocation(*assignment, broadcast); EXPECT_EQ(buffer_for_bcast, GetTopLevelAllocation(*assignment, negate)); EXPECT_NE(buffer_for_bcast, GetTopLevelAllocation(*assignment, slice)); } TEST_F(BufferAssignmentTest, DoNotReuseOversizedOutputBufferInTuple) { auto builder = HloComputation::Builder(TestName()); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "param0")); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param0)); auto slice = builder.AddInstruction( HloInstruction::CreateSlice(f32vec10_, negate, {0}, {10}, {1})); auto broadcast = builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {10, 4}), slice, {0})); builder.AddInstruction(HloInstruction::CreateTuple({broadcast})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, negate)); EXPECT_NE(GetTopLevelAllocation(*assignment, broadcast), GetTopLevelAllocation(*assignment, slice)); } TEST_F(BufferAssignmentTest, EmbeddedComputationBuffers) { auto module = CreateNewVerifiedModule(); auto vec_shape = ShapeUtil::MakeShape(F32, {42}); auto scalar_shape = ShapeUtil::MakeShape(F32, {}); auto map_builder = HloComputation::Builder(TestName() + "_map"); auto map_param = map_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape, "map_param")); auto map_root = map_builder.AddInstruction( HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param)); auto map_computation = module->AddEmbeddedComputation(map_builder.Build()); auto call_builder = HloComputation::Builder(TestName() + "_call"); auto call_param = call_builder.AddInstruction( HloInstruction::CreateParameter(0, vec_shape, "vec_param")); auto call_root = call_builder.AddInstruction( HloInstruction::CreateUnary(vec_shape, HloOpcode::kExp, call_param)); auto call_computation = module->AddEmbeddedComputation(call_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vec_shape, "param")); auto call = builder.AddInstruction( HloInstruction::CreateCall(vec_shape, {param}, call_computation)); auto map = builder.AddInstruction( HloInstruction::CreateMap(vec_shape, {call}, map_computation)); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param); EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter()); EXPECT_FALSE(map_param_alloc.maybe_live_out()); EXPECT_TRUE(map_param_alloc.is_thread_local()); auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root); EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter()); EXPECT_FALSE(map_root_alloc.maybe_live_out()); EXPECT_TRUE(map_root_alloc.is_thread_local()); auto& call_param_alloc = GetTopLevelAllocation(*assignment, call_param); EXPECT_TRUE(call_param_alloc.is_entry_computation_parameter()); EXPECT_FALSE(call_param_alloc.maybe_live_out()); EXPECT_FALSE(call_param_alloc.is_thread_local()); auto& call_root_alloc = GetTopLevelAllocation(*assignment, call_root); EXPECT_FALSE(call_root_alloc.is_entry_computation_parameter()); EXPECT_FALSE(call_root_alloc.is_thread_local()); auto& param_alloc = GetTopLevelAllocation(*assignment, param); EXPECT_TRUE(param_alloc.is_entry_computation_parameter()); EXPECT_FALSE(param_alloc.maybe_live_out()); EXPECT_FALSE(param_alloc.is_thread_local()); auto& map_alloc = GetTopLevelAllocation(*assignment, map); EXPECT_FALSE(map_alloc.is_entry_computation_parameter()); EXPECT_TRUE(map_alloc.maybe_live_out()); EXPECT_FALSE(map_alloc.is_thread_local()); } TEST_F(BufferAssignmentTest, CustomCallEmbeddedComputationBuffers) { auto module = CreateNewVerifiedModule(); auto scalar_shape = ShapeUtil::MakeShape(F32, {}); auto map_builder = HloComputation::Builder(TestName() + "_map"); auto map_param = map_builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape, "map_param")); auto map_root = map_builder.AddInstruction( HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, map_param)); auto map_computation = module->AddEmbeddedComputation(map_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape, "param")); builder.AddInstruction(HloInstruction::CreateCustomCall( scalar_shape, {param}, map_computation, "call_name")); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); auto& map_param_alloc = GetTopLevelAllocation(*assignment, map_param); EXPECT_FALSE(map_param_alloc.is_entry_computation_parameter()); EXPECT_FALSE(map_param_alloc.maybe_live_out()); EXPECT_TRUE(map_param_alloc.is_thread_local()); auto& map_root_alloc = GetTopLevelAllocation(*assignment, map_root); EXPECT_FALSE(map_root_alloc.is_entry_computation_parameter()); EXPECT_FALSE(map_root_alloc.maybe_live_out()); EXPECT_TRUE(map_root_alloc.is_thread_local()); } TEST_F(BufferAssignmentTest, TupleParameterAsOutput) { auto builder = HloComputation::Builder(TestName()); auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}), ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(S32, {42})}), "param0")); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(4, assignment->Allocations().size()); ShapeUtil::ForEachSubshape( tuple_param->shape(), [this, &assignment, tuple_param](const Shape& , const ShapeIndex& index) { auto allocation = GetAllocation(*assignment, tuple_param, index); EXPECT_TRUE(allocation.is_entry_computation_parameter()); EXPECT_EQ(0, allocation.parameter_number()); EXPECT_TRUE(allocation.maybe_live_out()); }); } TEST_F(BufferAssignmentTest, ElementOfNestedTupleParameterAsOutput) { auto builder = HloComputation::Builder(TestName()); auto tuple_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}), ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {42}), ShapeUtil::MakeShape(S32, {101})})}), "param0")); auto tuple_element = builder.AddInstruction(HloInstruction::CreateGetTupleElement( ShapeUtil::GetSubshape(tuple_param->shape(), {1}), tuple_param, 1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_FALSE( GetAllocation(*assignment, tuple_param, {}).maybe_live_out()); EXPECT_TRUE( GetAllocation(*assignment, tuple_param, {1}).maybe_live_out()); EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 0}) .maybe_live_out()); EXPECT_TRUE(GetAllocation(*assignment, tuple_param, {1, 1}) .maybe_live_out()); EXPECT_TRUE( GetTopLevelAllocation(*assignment, tuple_element).maybe_live_out()); EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 0}), GetAllocation(*assignment, tuple_element, {0})); EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1, 1}), GetAllocation(*assignment, tuple_element, {1})); EXPECT_EQ(GetAllocation(*assignment, tuple_param, {1}), GetTopLevelAllocation(*assignment, tuple_element)); } TEST_F(BufferAssignmentTest, TupleConstantAsOutput) { auto builder = HloComputation::Builder(TestName()); Literal elements[] = {LiteralUtil::CreateR0<int64_t>(0), LiteralUtil::CreateR0<int64_t>(1)}; builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::MakeTuple({&elements[0], &elements[1]}))); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(3, assignment->Allocations().size()); } TEST_F(BufferAssignmentTest, TupleCustomCallAsOutput) { auto builder = HloComputation::Builder(TestName()); auto custom_call = builder.AddInstruction(HloInstruction::CreateCustomCall( ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(PRED, {1, 2, 3, 4}), ShapeUtil::MakeShape(S32, {101})}), {}, "foo_function")); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(3, assignment->Allocations().size()); EXPECT_TRUE( GetAllocation(*assignment, custom_call, {}).maybe_live_out()); EXPECT_TRUE( GetAllocation(*assignment, custom_call, {0}).maybe_live_out()); EXPECT_TRUE( GetAllocation(*assignment, custom_call, {1}).maybe_live_out()); } TEST_F(BufferAssignmentTest, CustomCallAliasedBuffer) { const char* const kModuleString = R"( HloModule xla_computation_f ENTRY xla_computation_f { parameter.1 = f32[2,3,4,5] parameter(0) parameter.2 = f32[2,3,4,5] parameter(1) add = f32[2,3,4,5] add(parameter.1, parameter.2) ROOT custom-call = f32[2,3,4,5] custom-call(add, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnUnverifiedModule(kModuleString)); std::unique_ptr<BufferAssignment> assignment = RunBufferAssignment(module.get()); HloInstruction* custom_call = module->entry_computation()->root_instruction(); EXPECT_TRUE( assignment->SharesTopLevelSlice(custom_call, custom_call->operand(0))); } TEST_F(BufferAssignmentTest, TupleCallAsOutput) { auto module = CreateNewVerifiedModule(); auto elem_shape = f32vec4_; auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape}); auto sub_builder = HloComputation::Builder(TestName() + "_sub"); auto sub_param = sub_builder.AddInstruction( HloInstruction::CreateParameter(0, elem_shape, "sub_param")); auto sub_tuple = sub_builder.AddInstruction(HloInstruction::CreateTuple({sub_param})); auto sub_computation = module->AddEmbeddedComputation(sub_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, elem_shape, "param")); auto call = builder.AddInstruction( HloInstruction::CreateCall(tuple_shape, {param}, sub_computation)); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(2, assignment->Allocations().size()); EXPECT_EQ(GetAllocation(*assignment, call, {}), GetAllocation(*assignment, sub_tuple, {})); EXPECT_EQ(GetAllocation(*assignment, call, {0}), GetAllocation(*assignment, sub_param, {})); EXPECT_NE(GetTopLevelAllocation(*assignment, param), GetTopLevelAllocation(*assignment, sub_tuple)); EXPECT_EQ(GetTopLevelAllocation(*assignment, param), GetTopLevelAllocation(*assignment, sub_param)); } TEST_F(BufferAssignmentTest, TupleChainedCallAsOutput) { auto module = CreateNewVerifiedModule(); auto elem_shape = f32vec4_; auto tuple_shape = ShapeUtil::MakeTupleShape({elem_shape}); auto d_builder = HloComputation::Builder(TestName() + "_d"); auto d_param = d_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "d_param")); auto d_computation = d_builder.Build(); auto c_builder = HloComputation::Builder(TestName() + "_c"); auto c_param = c_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "c_param")); auto c_call = c_builder.AddInstruction( HloInstruction::CreateCall(tuple_shape, {c_param}, d_computation.get())); auto c_computation = c_builder.Build(); auto b_builder = HloComputation::Builder(TestName() + "_b"); auto b_param = b_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "b_param")); auto b_call = b_builder.AddInstruction( HloInstruction::CreateCall(tuple_shape, {b_param}, c_computation.get())); auto b_computation = b_builder.Build(); auto a_builder = HloComputation::Builder(TestName()); auto a_param = a_builder.AddInstruction( HloInstruction::CreateParameter(0, elem_shape, "param")); auto a_tuple = a_builder.AddInstruction(HloInstruction::CreateTuple({a_param})); auto a_call = a_builder.AddInstruction( HloInstruction::CreateCall(tuple_shape, {a_tuple}, b_computation.get())); auto a_computation = a_builder.Build(); module->AddEmbeddedComputation(std::move(d_computation)); module->AddEmbeddedComputation(std::move(c_computation)); module->AddEntryComputation(std::move(a_computation)); module->AddEmbeddedComputation(std::move(b_computation)); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(GetAllocation(*assignment, a_call, {}), GetAllocation(*assignment, b_call, {})); EXPECT_EQ(GetAllocation(*assignment, b_call, {}), GetAllocation(*assignment, c_call, {})); EXPECT_EQ(GetAllocation(*assignment, c_call, {}), GetAllocation(*assignment, d_param, {})); EXPECT_EQ(GetAllocation(*assignment, a_call, {0}), GetAllocation(*assignment, b_call, {0})); EXPECT_EQ(GetAllocation(*assignment, b_call, {0}), GetAllocation(*assignment, c_call, {0})); EXPECT_EQ(GetAllocation(*assignment, c_call, {0}), GetAllocation(*assignment, d_param, {0})); EXPECT_TRUE(BuffersDistinct({a_param}, {b_param}, *assignment)); EXPECT_TRUE(BuffersDistinct({a_param}, {c_param}, *assignment)); EXPECT_TRUE(BuffersDistinct({a_param}, {d_param}, *assignment)); EXPECT_EQ(GetAllocation(*assignment, b_param, {0}), GetAllocation(*assignment, c_param, {0})); EXPECT_EQ(GetAllocation(*assignment, c_param, {0}), GetAllocation(*assignment, d_param, {0})); } TEST_F(BufferAssignmentTest, BitcastAsOutput) { auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {42}), "param")); auto bitcast = builder.AddInstruction( HloInstruction::CreateBitcast(param->shape(), param)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(1, assignment->Allocations().size()); EXPECT_EQ(GetTopLevelAllocation(*assignment, param), GetTopLevelAllocation(*assignment, bitcast)); } TEST_F(BufferAssignmentTest, TupleBufferNotReused) { auto builder = HloComputation::Builder(TestName()); auto scalar_shape = ShapeUtil::MakeShape(F32, {}); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape, "param0")); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({param})); auto tuple_element = builder.AddInstruction( HloInstruction::CreateGetTupleElement(scalar_shape, tuple, 0)); auto copy = builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape, HloOpcode::kCopy, tuple_element)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment_orig = RunBufferAssignment(module.get()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> assignment, ConvertToProtoAndBack(assignment_orig.get(), module.get())); EXPECT_EQ(3, assignment->Allocations().size()); EXPECT_NE(GetTopLevelAllocation(*assignment, tuple), GetTopLevelAllocation(*assignment, copy)); } TEST_F(BufferAssignmentTest, OneTempAllocation) { auto builder = HloComputation::Builder(TestName()); Shape shape_2x3 = ShapeUtil::MakeShape(F32, {2, 3}); Shape shape_2x4 = ShapeUtil::MakeShape(F32, {2, 4}); Shape shape_3x4 = ShapeUtil::MakeShape(F32, {3, 4}); Shape shape_4x4 = ShapeUtil::MakeShape(F32, {4, 4}); Shape shape_5x4 = ShapeUtil::MakeShape(F32, {5, 4}); auto param_a = builder.AddInstruction( HloInstruction::CreateParameter(0, shape_2x3, "param_a")); auto param_b = builder.AddInstruction( HloInstruction::CreateParameter(1, shape_3x4, "param_b")); auto param_c = builder.AddInstruction( HloInstruction::CreateParameter(2, shape_4x4, "param_c")); DotDimensionNumbers dot_dnums; dot_dnums.add_lhs_contracting_dimensions(1); dot_dnums.add_rhs_contracting_dimensions(0); PrecisionConfig precision_config; precision_config.mutable_operand_precision()->Resize( 2, PrecisionConfig::DEFAULT); auto dot_ab = builder.AddInstruction(HloInstruction::CreateDot( shape_2x4, param_a, param_b, dot_dnums, precision_config)); auto dot_bc = builder.AddInstruction(HloInstruction::CreateDot( shape_3x4, param_b, param_c, dot_dnums, precision_config)); builder.AddInstruction( HloInstruction::CreateConcatenate(shape_5x4, {dot_ab, dot_bc}, 0)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto assignment = RunBufferAssignment(module.get(), 1); EXPECT_EQ(5, assignment->Allocations().size()); BufferAllocation::Slice slice_ab = assignment->GetUniqueTopLevelSlice(dot_ab).value(); BufferAllocation::Slice slice_bc = assignment->GetUniqueTopLevelSlice(dot_bc).value(); EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation()); EXPECT_NE(slice_ab, slice_bc); EXPECT_EQ(32, slice_ab.size()); EXPECT_EQ(48, slice_bc.size()); EXPECT_EQ(80, slice_ab.allocation()->size()); EXPECT_EQ(80, slice_bc.allocation()->size()); assignment = RunBufferAssignment(module.get(), 64); EXPECT_EQ(5, assignment->Allocations().size()); slice_ab = assignment->GetUniqueTopLevelSlice(dot_ab).value(); slice_bc = assignment->GetUniqueTopLevelSlice(dot_bc).value(); EXPECT_EQ(slice_ab.allocation(), slice_bc.allocation()); EXPECT_NE(slice_ab, slice_bc); EXPECT_EQ(32, slice_ab.size()); EXPECT_EQ(48, slice_bc.size()); if (slice_ab.offset() == 0) { EXPECT_EQ(64, slice_bc.offset()); EXPECT_EQ(64 + 48, slice_ab.allocation()->size()); EXPECT_EQ(64 + 48, slice_bc.allocation()->size()); } else { EXPECT_EQ(64, slice_ab.offset()); EXPECT_EQ(0, slice_bc.offset()); EXPECT_EQ(64 + 32, slice_ab.allocation()->size()); EXPECT_EQ(64 + 32, slice_bc.allocation()->size()); } } TEST_F(BufferAssignmentTest, TrivialPeakBuffers) { auto builder = HloComputation::Builder(TestName()); auto paramscalar = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p")); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(f32vec100_, paramscalar, {})); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(1, f32vec100_, "p1")); auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(2, f32vec100_, "p2")); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kMultiply, broadcast, param0)); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec100_, HloOpcode::kAdd, mul, param1)); auto sub = builder.AddInstruction(HloInstruction::CreateBinary( f32vec100_, HloOpcode::kSubtract, add, param1)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers = RunBufferAssignment(module.get()); const BufferAllocation& mul_buffer = GetTopLevelAllocation(*buffers, mul); const std::vector<const HloValue*>& peak_buffers = mul_buffer.PeakMemoryLogicalBuffers(); ASSERT_EQ(peak_buffers.size(), 1); EXPECT_EQ(peak_buffers[0]->instruction(), sub); } TEST_F(BufferAssignmentTest, PeakBuffers) { auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, f32vec100_, "p")); auto log = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kLog, param)); auto rev = builder.AddInstruction( HloInstruction::CreateReverse(f32vec100_, log, {0})); auto neg = builder.AddInstruction( HloInstruction::CreateUnary(f32vec100_, HloOpcode::kNegate, param)); const Shape concat_shape = ShapeUtil::MakeShape(F32, {200}); auto concat = builder.AddInstruction( HloInstruction::CreateConcatenate(concat_shape, {rev, neg}, 0)); auto root = builder.AddInstruction(HloInstruction::CreateSlice( ShapeUtil::MakeShape(F32, {1}), concat, {0}, {1}, {1})); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); auto buffers = RunBufferAssignmentWithInstructionSequence( module.get(), {param, log, rev, neg, concat, root}); const BufferAllocation& buffer = GetTopLevelAllocation(*buffers, concat); EXPECT_FALSE(buffer.IsInputOrOutput()); EXPECT_TRUE(buffer.IsPreallocatedTempBuffer()); ASSERT_EQ(buffer.assigned_buffers().size(), 4); const std::vector<const HloValue*>& peak_buffers = buffer.PeakMemoryLogicalBuffers(); ASSERT_EQ(peak_buffers.size(), 3); std::vector<const HloInstruction*> peak_instructions; for (const HloValue* logical_buffer : peak_buffers) { peak_instructions.push_back(logical_buffer->instruction()); } EXPECT_THAT(peak_instructions, UnorderedElementsAre(rev, neg, concat)); } TEST_F(BufferAssignmentTest, AliasedBuffersShouldntCoexistInPeakBuffers) { std::string hlo_text = R"( HloModule test_module, is_scheduled=true cond { param = (s32[], s32[]) parameter(0) ROOT constant = pred[] constant(true) } body { param.0 = (s32[], s32[]) parameter(0) gte = s32[] get-tuple-element(param.0), index=0 add = s32[] add(gte, gte) ROOT tuple = (s32[], s32[]) tuple(add, add) } ENTRY test_module { param.3 = s32[] parameter(0) copy = s32[] copy(param.3) tuple = (s32[], s32[]) tuple(copy, copy) while = (s32[], s32[]) while(tuple), condition=cond, body=body gte = s32[] get-tuple-element(while), index=0 ROOT negate = s32[] negate(gte) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text)); auto assignment = RunBufferAssignmentWithSequentialOrdering(module.get()); const BufferAllocation& buffer = GetTopLevelAllocation(*assignment, FindInstruction(module.get(), "copy")); const std::vector<const HloValue*>& peak_buffers = buffer.PeakMemoryLogicalBuffers(); int num_peak_buffers = 0; for (const HloValue* peak_buffer : peak_buffers) { if (peak_buffer->shape().IsArray()) { ++num_peak_buffers; } } EXPECT_EQ(num_peak_buffers, 1); } TEST_F(BufferAssignmentTest, InPlaceBuffer) { const char* hlo_text = R"( HloModule Module ENTRY main { state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0) constant.1 = f32[] constant(0) broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={} get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1 get-tuple-element.3 = s32[] get-tuple-element(state), index=0 constant.2 = s32[] constant(128) add.5 = s32[] add(get-tuple-element.3, constant.2) constant.3 = s32[] constant(0) dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3) dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3) ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text)); HloInstruction* parameter = m->entry_computation()->GetInstructionWithName("get-tuple-element.4"); HloInstruction* dus1 = m->entry_computation()->GetInstructionWithName("dynamic-update-slice.5"); HloInstruction* dus2 = m->entry_computation()->GetInstructionWithName("dynamic-update-slice.9"); auto buffers = RunBufferAssignment(m.get()); { const BufferAllocation& parameter_alloc = GetTopLevelAllocation(*buffers, parameter); const BufferAllocation& dus1_alloc = GetTopLevelAllocation(*buffers, dus1); EXPECT_EQ(parameter_alloc, dus1_alloc); const BufferAllocation& dus2_alloc = GetTopLevelAllocation(*buffers, dus2); EXPECT_EQ(parameter_alloc, dus2_alloc); } } TEST_F(BufferAssignmentTest, ConstantBuffersAreNotReused) { const char* hlo_text = R"( HloModule Module True { ROOT x.0.1 = f32[] parameter(0) } False { x.0.0 = f32[] parameter(0) ROOT copy.1 = f32[] copy(x.0.0) } ENTRY main { pred.1.0 = pred[] parameter(0) constant.1.1 = f32[] constant(56) copy.2 = f32[] copy(constant.1.1) constant.1.2 = f32[] constant(12) ROOT conditional.1.3 = f32[] conditional(pred.1.0, copy.2, constant.1.2), true_computation=True, false_computation=False } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text)); HloInstruction* constant_1 = m->entry_computation()->GetInstructionWithName("constant.1.1"); HloInstruction* constant_2 = m->entry_computation()->GetInstructionWithName("constant.1.2"); auto buffers = RunBufferAssignment(m.get()); { const BufferAllocation& allocation_for_const_1 = GetTopLevelAllocation(*buffers, constant_1); EXPECT_TRUE(allocation_for_const_1.is_constant()); for (const auto& buffer_offset_pair : allocation_for_const_1.assigned_buffers()) { EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(), HloOpcode::kCopy); EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(), HloOpcode::kConditional); } } { const BufferAllocation& allocation_for_const_2 = GetTopLevelAllocation(*buffers, constant_2); EXPECT_TRUE(allocation_for_const_2.is_constant()); for (const auto& buffer_offset_pair : allocation_for_const_2.assigned_buffers()) { EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(), HloOpcode::kCopy); EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(), HloOpcode::kConditional); } } } class WhileBufferAssignmentTest : public HloTestBase { protected: std::unique_ptr<HloComputation> BuildWhileConditionComputation( const std::string& name) { auto builder = HloComputation::Builder(name); builder.AddInstruction( HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state")); auto zero = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0))); auto ten = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(10))); builder.AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {}), zero, ten, ComparisonDirection::kLt)); return builder.Build(); } std::unique_ptr<HloComputation> BuildWhileBodyComputation( const std::string& name) { auto builder = HloComputation::Builder(name); auto loop_state = builder.AddInstruction( HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state")); auto input = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 0)); auto weights = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1)); auto output = builder.AddInstruction(HloInstruction::CreateBinary( data_shape_, HloOpcode::kMultiply, input, weights)); builder.AddInstruction( HloInstruction::CreateTuple({input, weights, output})); return builder.Build(); } std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module, int64_t alignment = 1) { HloSchedule schedule = ScheduleModule(module, ByteSizeOf).value(); return BufferAssigner::Run( module, std::make_unique<SequentialHloOrdering>(schedule), ByteSizeOf, [alignment](LogicalBuffer::Color) { return alignment; }, true) .value(); } static int64_t ByteSizeOf(const BufferValue& buffer) { return ShapeUtil::ByteSizeOf(buffer.shape(), sizeof(void*)); } Shape data_shape_ = ShapeUtil::MakeShape(F32, {4}); Shape loop_state_shape_ = ShapeUtil::MakeTupleShape({data_shape_, data_shape_, data_shape_}); }; static void RunCopyInsertion(HloModule* module) { CopyInsertion copy_insertion; EXPECT_IS_OK(copy_insertion.Run(module).status()); } TEST_F(WhileBufferAssignmentTest, TwoForwardWhileLoops) { auto module = CreateNewVerifiedModule(); auto builder = HloComputation::Builder("entry"); auto input0 = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape_, "input0")); auto weights0 = builder.AddInstruction( HloInstruction::CreateParameter(1, data_shape_, "weights0")); auto weights1 = builder.AddInstruction( HloInstruction::CreateParameter(2, data_shape_, "weights1")); auto zero = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto output0 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {})); auto output1 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {})); auto cond0 = module->AddEmbeddedComputation(BuildWhileConditionComputation("cond")); auto body0 = module->AddEmbeddedComputation(BuildWhileBodyComputation("body")); auto tuple0 = builder.AddInstruction( HloInstruction::CreateTuple({input0, weights0, output0})); auto while0 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0)); auto cond1 = module->AddEmbeddedComputation(BuildWhileConditionComputation("cond")); auto body1 = module->AddEmbeddedComputation(BuildWhileBodyComputation("body")); auto input1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, while0, 2)); auto tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({input1, weights1, output1})); auto while1 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1)); module->AddEntryComputation(builder.Build()); RunCopyInsertion(module.get()); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(assignment->GetUniqueSlice(input0, {}).value(), assignment->GetUniqueSlice(while0, {0}).value()); EXPECT_EQ(assignment->GetUniqueSlice(weights0, {}).value(), assignment->GetUniqueSlice(while0, {1}).value()); EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(), assignment->GetUniqueSlice(while1, {0}).value()); EXPECT_EQ(assignment->GetUniqueSlice(weights1, {}).value(), assignment->GetUniqueSlice(while1, {1}).value()); } TEST_F(WhileBufferAssignmentTest, ColocatedBufferWithEntryParameter) { const Shape r0s32 = ShapeUtil::MakeShape(S32, {}); const char* module_str = R"( HloModule test_module %cond.v0 { %param = s32[] parameter(0) ROOT %constant = pred[] constant(true) } %cond.v1 { %param.0 = s32[] parameter(0) ROOT %constant.0 = pred[] constant(true) } %body.v0 { ROOT %param.1 = s32[] parameter(0) } %body.v1 { %param.2 = s32[] parameter(0) ROOT add = s32[] add(%param.2, %param.2) } ENTRY %test_module { %param.3 = s32[] parameter(0) %while.0 = s32[] while(%param.3), condition=%cond.v0, body=%body.v0 %mul = s32[] multiply(%while.0, %while.0) %while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1 ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={} })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); int64_t instruction_count = m->instruction_count(); CopyInsertion copy_insertion; ASSERT_IS_OK(copy_insertion.Run(m.get()).status()); ASSERT_EQ(instruction_count, m->instruction_count()); const HloInstruction* bcast = m->entry_computation()->root_instruction(); const HloInstruction* param = m->entry_computation()->parameter_instruction(0); ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast); const HloInstruction* while1 = bcast->operand(0); ASSERT_EQ(while1->opcode(), HloOpcode::kWhile); const HloInstruction* while0 = while1->operand(0)->operand(0); ASSERT_EQ(while0->opcode(), HloOpcode::kWhile); auto assignment = RunBufferAssignment(m.get()); TF_ASSERT_OK_AND_ASSIGN(auto slice_param, assignment->GetUniqueSlice(param, {})); TF_ASSERT_OK_AND_ASSIGN(auto slice_while0, assignment->GetUniqueSlice(while0, {})); TF_ASSERT_OK_AND_ASSIGN(auto slice_while1, assignment->GetUniqueSlice(while1, {})); EXPECT_EQ(slice_param, slice_while0); EXPECT_NE(slice_param, slice_while1); } TEST_F(WhileBufferAssignmentTest, ColocatedBufferWithConstant) { const Shape r0s32 = ShapeUtil::MakeShape(S32, {}); const char* module_str = R"( HloModule test_module %cond.v0 { %param = s32[] parameter(0) ROOT %constant = pred[] constant(true) } %cond.v1 { %param.0 = s32[] parameter(0) ROOT %constant.0 = pred[] constant(true) } %body.v0 { ROOT %param.1 = s32[] parameter(0) } %body.v1 { %param.2 = s32[] parameter(0) ROOT add = s32[] add(%param.2, %param.2) } ENTRY %test_module { %constant.42 = s32[] constant(42) %while.0 = s32[] while(%constant.42), condition=%cond.v0, body=%body.v0 %mul = s32[] multiply(%while.0, %while.0) %while.1 = s32[] while(%mul), condition=%cond.v1, body=%body.v1 ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[] %while.1), dimensions={} })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); int64_t instruction_count = m->instruction_count(); CopyInsertion copy_insertion; ASSERT_IS_OK(copy_insertion.Run(m.get()).status()); ASSERT_EQ(instruction_count, m->instruction_count()); const HloInstruction* bcast = m->entry_computation()->root_instruction(); const HloInstruction* constant = m->entry_computation()->GetInstructionWithName("constant.42"); ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast); const HloInstruction* while1 = bcast->operand(0); ASSERT_EQ(while1->opcode(), HloOpcode::kWhile); const HloInstruction* while0 = while1->operand(0)->operand(0); ASSERT_EQ(while0->opcode(), HloOpcode::kWhile); auto assignment = RunBufferAssignment(m.get()); TF_ASSERT_OK_AND_ASSIGN(auto slice_constant, assignment->GetUniqueSlice(constant, {})); TF_ASSERT_OK_AND_ASSIGN(auto slice_while0, assignment->GetUniqueSlice(while0, {})); TF_ASSERT_OK_AND_ASSIGN(auto slice_while1, assignment->GetUniqueSlice(while1, {})); EXPECT_EQ(slice_constant, slice_while0); EXPECT_NE(slice_constant, slice_while1); } TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) { const Shape r0s32 = ShapeUtil::MakeShape(S32, {}); auto build_cond = [&]() { auto builder = HloComputation::Builder("cond"); auto const4 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4))); auto param = builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x")); builder.AddInstruction( HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param, const4, ComparisonDirection::kLt)); return builder.Build(); }; auto build_body = [&]() { auto builder = HloComputation::Builder("body"); auto const9 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(9))); auto param = builder.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "x")); builder.AddInstruction( HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, param, const9)); return builder.Build(); }; auto module = CreateNewVerifiedModule(); auto builder = HloComputation::Builder("entry"); auto token = builder.AddInstruction(HloInstruction::CreateToken()); auto infeed = builder.AddInstruction(HloInstruction::CreateInfeed(r0s32, token, "")); auto infeed_data = builder.AddInstruction( HloInstruction::CreateGetTupleElement(r0s32, infeed, 0)); auto cond0 = module->AddEmbeddedComputation(build_cond()); auto body0 = module->AddEmbeddedComputation(build_body()); auto while0 = builder.AddInstruction( HloInstruction::CreateWhile(r0s32, cond0, body0, infeed_data)); auto cond1 = module->AddEmbeddedComputation(build_cond()); auto body1 = module->AddEmbeddedComputation(build_body()); auto while1 = builder.AddInstruction( HloInstruction::CreateWhile(r0s32, cond1, body1, while0)); auto zero = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(r0s32, HloOpcode::kAdd, zero, zero)); auto cond2 = module->AddEmbeddedComputation(build_cond()); auto body2 = module->AddEmbeddedComputation(build_body()); auto while2 = builder.AddInstruction( HloInstruction::CreateWhile(r0s32, cond2, body2, add)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({while2, while1})); module->AddEntryComputation(builder.Build()); int64_t instruction_count = module->instruction_count(); CopyInsertion copy_insertion; ASSERT_IS_OK(copy_insertion.Run(module.get()).status()); ASSERT_EQ(instruction_count, module->instruction_count()); TF_ASSERT_OK_AND_ASSIGN( HloSchedule schedule, ScheduleModule(module.get(), [](const BufferValue& buffer) { return ShapeUtil::ByteSizeOf(buffer.shape(), sizeof(void*)); })); schedule.set_sequence( module->entry_computation(), {token, infeed, infeed_data, while0, while1, zero, add, while2, tuple}); TF_ASSERT_OK(schedule.Verify()); TF_ASSERT_OK_AND_ASSIGN( auto assignment, BufferAssigner::Run( module.get(), std::make_unique<SequentialHloOrdering>(schedule), backend().compiler()->BufferSizeBytesFunction(), [](LogicalBuffer::Color) { return 1; }, true)); TF_ASSERT_OK_AND_ASSIGN(auto slice0, assignment->GetUniqueSlice(tuple, {0})); TF_ASSERT_OK_AND_ASSIGN(auto slice1, assignment->GetUniqueSlice(tuple, {1})); EXPECT_NE(slice0, slice1); TF_ASSERT_OK_AND_ASSIGN(auto slice_while0, assignment->GetUniqueSlice(while0, {})); TF_ASSERT_OK_AND_ASSIGN(auto slice_while1, assignment->GetUniqueSlice(while1, {})); EXPECT_EQ(slice1, slice_while0); EXPECT_EQ(slice1, slice_while1); TF_ASSERT_OK_AND_ASSIGN(auto slice_while2, assignment->GetUniqueSlice(while2, {})); EXPECT_EQ(slice0, slice_while2); } TEST_F(WhileBufferAssignmentTest, OneForwardBackwardWhileLoopSet) { auto module = CreateNewVerifiedModule(); auto builder = HloComputation::Builder("entry"); auto input0 = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape_, "input0")); auto weights0 = builder.AddInstruction( HloInstruction::CreateParameter(1, data_shape_, "weights0")); auto zero = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto output0 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {})); auto cond0 = module->AddEmbeddedComputation(BuildWhileConditionComputation("cond")); auto body0 = module->AddEmbeddedComputation(BuildWhileBodyComputation("body")); auto tuple0 = builder.AddInstruction( HloInstruction::CreateTuple({input0, weights0, output0})); auto while0 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0)); auto cond1 = module->AddEmbeddedComputation(BuildWhileConditionComputation("cond")); auto body1 = module->AddEmbeddedComputation(BuildWhileBodyComputation("body")); auto while1 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, while0)); module->AddEntryComputation(builder.Build()); RunCopyInsertion(module.get()); auto assignment = RunBufferAssignment(module.get()); EXPECT_EQ(assignment->GetUniqueSlice(while0, {0}).value(), assignment->GetUniqueSlice(while1, {0}).value()); EXPECT_EQ(assignment->GetUniqueSlice(while0, {1}).value(), assignment->GetUniqueSlice(while1, {1}).value()); EXPECT_EQ(assignment->GetUniqueSlice(while0, {2}).value(), assignment->GetUniqueSlice(while1, {2}).value()); } TEST_F(BufferAssignmentTest, TwoCalls) { auto module = CreateNewVerifiedModule(); Shape r0f32 = ShapeUtil::MakeShape(xla::F32, {}); HloComputation* sub_computation; { auto builder = HloComputation::Builder(TestName() + "_sub_comp"); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param")); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, constant1)); sub_computation = module->AddEmbeddedComputation(builder.Build(add)); } auto builder = HloComputation::Builder(TestName()); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); auto call1 = builder.AddInstruction( HloInstruction::CreateCall(r0f32, {constant2}, sub_computation)); auto call2 = builder.AddInstruction( HloInstruction::CreateCall(r0f32, {constant3}, sub_computation)); auto add1 = builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call1, constant2)); auto add2 = builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, call2, add1)); module->AddEntryComputation(builder.Build(add2)); { FlattenCallGraph flatten; TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get())); EXPECT_TRUE(result); std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module.get()); } RunCopyInsertion(module.get()); auto assignment = RunBufferAssignment(module.get()); EXPECT_TRUE(BuffersDistinct({call1}, {call2}, *assignment)); } TEST_F(BufferAssignmentTest, CallParamCoAllocation) { const char* hlo_text = R"( HloModule CallParamCoAllocation Callee { param0 = (f32[100],(f32[200],f32[300])) parameter(0) param1 = s32[20] parameter(1) ROOT constant = f32[] constant(1) } ENTRY Main { entry_param0 = f32[100] parameter(0) entry_param1 = s32[20] parameter(1) custom_call = (f32[200],f32[300]) custom-call(), custom_call_target="call-target" call_op0 = (f32[100],(f32[200],f32[300])) tuple(entry_param0, custom_call) ROOT call_result = f32[] call(call_op0, entry_param1), to_apply=Callee } )"; HloModuleConfig config; config.set_debug_options(GetDebugOptionsFromFlags()); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text, config)); auto buffers = RunBufferAssignment(m.get()); HloComputation* main = m->entry_computation(); HloComputation* callee = m->GetComputationWithName("Callee"); EXPECT_NE(callee, nullptr); HloInstruction* param0 = callee->parameter_instruction(0); HloInstruction* param1 = callee->parameter_instruction(1); HloInstruction* entry_param0 = main->parameter_instruction(0); HloInstruction* entry_param1 = main->parameter_instruction(1); HloInstruction* custom_call = main->GetInstructionWithName("custom_call"); EXPECT_EQ(GetAllocation(*buffers, entry_param0, {}), GetAllocation(*buffers, param0, {0})); EXPECT_EQ(GetAllocation(*buffers, entry_param1, {}), GetAllocation(*buffers, param1, {})); EXPECT_EQ(GetAllocation(*buffers, custom_call, {}), GetAllocation(*buffers, param0, {1})); EXPECT_EQ(GetAllocation(*buffers, custom_call, {0}), GetAllocation(*buffers, param0, {1, 0})); EXPECT_EQ(GetAllocation(*buffers, custom_call, {1}), GetAllocation(*buffers, param0, {1, 1})); } TEST_F(BufferAssignmentTest, AsyncCall) { const char* hlo_text = R"( HloModule AsyncCall, is_scheduled=true %called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] { %param_0 = f32[4096]{0} parameter(0) %param_1 = f32[4096]{0} parameter(1) %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0) %negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1) %negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1) %negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2) ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3) } ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] { %a = f32[4096]{0} parameter(0) %b = f32[4096]{0} parameter(1) %async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), to_apply=%called_computation %negate_4 = f32[4096]{0} negate(f32[4096]{0} %a) %negate_5 = f32[4096]{0} negate(f32[4096]{0} %b) %negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5) %negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6) %add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7) %async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start) ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text)); auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get()); LOG(INFO) << buffers->ToString(); auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) { return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index) .value(); }; EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {})); EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {})); EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done", {})); for (const auto& hlo_name : {"negate_0", "negate_1", "negate_2", "negate_3"}) { EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_4", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_5", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {})); } } TEST_F(BufferAssignmentTest, AsyncCallPrivateStack) { const char* hlo_text = R"( HloModule AsyncCall, is_scheduled=true %called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] { %param_0 = f32[4096]{0} parameter(0) %param_1 = f32[4096]{0} parameter(1) %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0) %negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1) %negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1) %negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2) ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3) }, execution_thread="foobar" ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] { %a = f32[4096]{0} parameter(0) %b = f32[4096]{0} parameter(1) %async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread="foobar", to_apply=%called_computation %negate_4 = f32[4096]{0} negate(f32[4096]{0} %a) %negate_5 = f32[4096]{0} negate(f32[4096]{0} %b) %negate_6 = f32[4096]{0} negate(f32[4096]{0} %negate_5) %negate_7 = f32[4096]{0} negate(f32[4096]{0} %negate_6) %add_0 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_7) %async-done = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start) ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text)); auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) { for (const HloBuffer& buffer : alias_analysis->buffers()) { int color = 1; for (const HloValue* value : buffer.values()) { if (absl::c_any_of( value->positions(), [](const HloPosition& position) { return position.instruction->parent()->execution_thread() != "foobar"; }) || absl::c_any_of(value->GetUses(), [](const HloUse& use) { return use.instruction->parent()->execution_thread() != "foobar"; })) { color = 0; } } for (const HloValue* value : buffer.values()) { const HloPosition& defining_position = value->defining_position(); if (defining_position.shape().has_layout()) { const int memory_space = defining_position.shape().layout().memory_space(); if (memory_space != 0) { color = memory_space; } } alias_analysis->dataflow_analysis() .GetValue(value->id()) .set_color(BufferValue::Color(color)); } } return absl::OkStatus(); }; BufferAssigner::PrivateStacks private_stacks; private_stacks[1] = {FindComputation(m.get(), "called_computation")}; auto buffers = RunBufferAssignmentWithSequentialOrdering( m.get(), 1, colorer, private_stacks); LOG(INFO) << buffers->ToString(); auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) { return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index) .value(); }; EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {})); EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {})); EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done", {})); for (const auto& hlo_name : {"negate_0", "negate_1", "negate_2", "negate_3"}) { EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_4", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_5", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {})); } EXPECT_NE(get_slice("negate_0", {}), get_slice("negate_1", {})); EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_2", {})); EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_3", {})); } TEST_F(BufferAssignmentTest, MultipleAsyncCallPrivateStack) { const char* hlo_text = R"( HloModule AsyncCall, is_scheduled=true %called_computation1 { %param_0 = f32[4096]{0} parameter(0) %param_1 = f32[4096]{0} parameter(1) %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0) %negate_1 = f32[4096]{0} negate(f32[4096]{0} %param_1) %negate_2 = f32[4096]{0} negate(f32[4096]{0} %negate_1) %negate_3 = f32[4096]{0} negate(f32[4096]{0} %negate_2) ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_3) }, execution_thread="foobar" %called_computation2 { %param_2 = f32[4096]{0} parameter(0) %param_3 = f32[4096]{0} parameter(1) %negate_4 = f32[4096]{0} negate(f32[4096]{0} %param_2) %negate_5 = f32[4096]{0} negate(f32[4096]{0} %param_3) ROOT %result.2 = f32[4096]{0} add(f32[4096]{0} %negate_4, f32[4096]{0} %negate_5) }, execution_thread="foobar" ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] { %a = f32[4096]{0} parameter(0) %b = f32[4096]{0} parameter(1) %async-start.1 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %a, f32[4096]{0} %b), async_execution_thread="foobar", to_apply=%called_computation1 %async-start.2 = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) call-start(f32[4096]{0} %b, f32[4096]{0} %a), async_execution_thread="foobar", to_apply=%called_computation2 %negate_6 = f32[4096]{0} negate(f32[4096]{0} %a) %negate_7 = f32[4096]{0} negate(f32[4096]{0} %b) %negate_8 = f32[4096]{0} negate(f32[4096]{0} %negate_7) %negate_9 = f32[4096]{0} negate(f32[4096]{0} %negate_8) %add_0 = f32[4096]{0} add(f32[4096]{0} %negate_6, f32[4096]{0} %negate_9) %async-done.1 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.1) %async-done.2 = f32[4096]{0} call-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start.2) %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done.1) ROOT %add_2 = f32[4096]{0} add(f32[4096]{0} %add_1, f32[4096]{0} %async-done.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text)); auto colorer = [](HloAliasAnalysis* alias_analysis, const HloOrdering&) { for (const HloBuffer& buffer : alias_analysis->buffers()) { int color = 1; for (const HloValue* value : buffer.values()) { if (absl::c_any_of( value->positions(), [](const HloPosition& position) { return position.instruction->parent()->execution_thread() != "foobar"; }) || absl::c_any_of(value->GetUses(), [](const HloUse& use) { return use.instruction->parent()->execution_thread() != "foobar"; })) { color = 0; } } for (const HloValue* value : buffer.values()) { const HloPosition& defining_position = value->defining_position(); if (defining_position.shape().has_layout()) { const int memory_space = defining_position.shape().layout().memory_space(); if (memory_space != 0) { color = memory_space; } } alias_analysis->dataflow_analysis() .GetValue(value->id()) .set_color(BufferValue::Color(color)); } } return absl::OkStatus(); }; BufferAssigner::PrivateStacks private_stacks; private_stacks[1] = {FindComputation(m.get(), "called_computation1"), FindComputation(m.get(), "called_computation2")}; auto buffers = RunBufferAssignmentWithSequentialOrdering( m.get(), 1, colorer, private_stacks); LOG(INFO) << buffers->ToString(); auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) { return buffers->GetUniqueSlice(FindInstruction(m.get(), hlo_name), index) .value(); }; EXPECT_EQ(get_slice("param_0", {}), get_slice("a", {})); EXPECT_EQ(get_slice("param_3", {}), get_slice("a", {})); EXPECT_EQ(get_slice("param_1", {}), get_slice("b", {})); EXPECT_EQ(get_slice("param_2", {}), get_slice("b", {})); EXPECT_EQ(get_slice("result.1", {}), get_slice("async-done.1", {})); EXPECT_EQ(get_slice("result.2", {}), get_slice("async-done.2", {})); for (const auto& hlo_name : {"negate_0", "negate_1", "negate_2", "negate_3", "negate_4", "negate_5"}) { EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_6", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_7", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_8", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("negate_9", {})); EXPECT_NE(get_slice(hlo_name, {}), get_slice("add_0", {})); } EXPECT_NE(get_slice("negate_0", {}), get_slice("negate_1", {})); EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_2", {})); EXPECT_EQ(get_slice("negate_1", {}), get_slice("negate_3", {})); EXPECT_TRUE(get_slice("negate_4", {}) == get_slice("negate_0", {}) || get_slice("negate_4", {}) == get_slice("negate_1", {})); EXPECT_TRUE(get_slice("negate_5", {}) == get_slice("negate_0", {}) || get_slice("negate_5", {}) == get_slice("negate_1", {})); } TEST_F(BufferAssignmentTest, AsyncCallImplicitSharding) { std::string hlo_string = R"( HloModule module, is_scheduled=true called_computation { param0 = f32[4] parameter(0) constant = f32[1] constant(1) dynamic-update-slice = f32[4] dynamic-update-slice(param0, constant, constant) ROOT negate = f32[4] negate(dynamic-update-slice) } ENTRY entry { p0 = f32[8] parameter(0) call-start = ((f32[8]), f32[8], s32[]) call-start(p0), async_execution_thread="foo", to_apply=called_computation ROOT call-done = f32[8] call-done(call-start) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); auto buffers = RunBufferAssignmentWithSequentialOrdering(module.get()); LOG(INFO) << buffers->ToString(); auto get_slice = [&](std::string_view hlo_name, const ShapeIndex& index) { return buffers ->GetUniqueSlice(FindInstruction(module.get(), hlo_name), index) .value(); }; EXPECT_EQ(get_slice("p0", {}).size(), 32); EXPECT_EQ(get_slice("dynamic-update-slice", {}).size(), 32); } TEST_F(BufferAssignmentTest, AsyncCustomCall) { const char* hlo_text = R"( HloModule AsyncCustomCall, is_scheduled=true ENTRY %main (a: f32[4096]) -> f32[4096] { %a = f32[4096]{0} parameter(0) %neg_0 = f32[4096]{0} negate(f32[4096]{0} %a) %async-start = ((f32[4096]{0}), f32[4096]{0}, u32[]) custom-call-start(f32[4096]{0} %neg_0), custom_call_target="Foo" %async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start) ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text)); auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get()); HloInstruction* neg_0 = FindInstruction(m.get(), "neg_0"); HloInstruction* async_done = FindInstruction(m.get(), "async-done"); EXPECT_FALSE(buffers->SharesTopLevelSlice(neg_0, async_done)); } TEST_F(BufferAssignmentTest, AsyncCustomCallWithAliasing) { const char* hlo_text = R"( HloModule AsyncCustomCall, is_scheduled=true ENTRY %main (a: f32[4096]) -> f32[4096] { %a = f32[4096]{0} parameter(0) %neg_0 = f32[4096]{0} negate(f32[4096]{0} %a) %async-start = ((f32[4096]{0}), f32[4096]{0}, u32[]) custom-call-start(f32[4096]{0} %neg_0), custom_call_target="Foo", output_to_operand_aliasing={{}: (0, {})} %async-done = f32[4096]{0} custom-call-done(((f32[4096]{0}), f32[4096]{0}, u32[]) %async-start) ROOT %neg_1 = f32[4096]{0} negate(f32[4096]{0} %async-done) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_text)); auto buffers = RunBufferAssignmentWithSequentialOrdering(m.get()); HloInstruction* neg_0 = FindInstruction(m.get(), "neg_0"); HloInstruction* async_done = FindInstruction(m.get(), "async-done"); EXPECT_TRUE(buffers->SharesTopLevelSlice(neg_0, async_done)); } TEST_F(BufferAssignmentTest, BufferIsolation) { absl::string_view module_str = R"( HloModule test_module, is_scheduled=true ENTRY %test_module { param.0 = s32[1024]{0} parameter(0) param.1 = s32[1024]{0} parameter(1) mul1 = s32[1024]{0} multiply(param.0, param.1) bcast1 = s32[4,1024]{1,0} broadcast(mul1), dimensions={1} bcast2 = s32[4,1024]{1,0} broadcast(param.0), dimensions={1} mul2 = s32[1024]{0} multiply(mul1, param.0) add1 = s32[1024]{0} add(mul1, mul2) sub2 = s32[1024]{0} subtract(mul1, mul2) mul3 = s32[1024]{0} multiply(mul2, add1) mul4 = s32[1024]{0} multiply(mul3, sub2) bcast3 = s32[4,1024]{1,0} broadcast(mul4), dimensions={1} add2 = s32[4,1024]{1,0} add(bcast3, bcast2) ROOT add3 = s32[4,1024]{1,0} add(add2, bcast1) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); std::unique_ptr<BufferAssignment> nonisolation_assignment = RunBufferAssignmentWithIsolationOptions(m.get()); auto nonisolation_allocation = absl::c_find_if(nonisolation_assignment->Allocations(), [](const BufferAllocation& allocation) { return allocation.IsPreallocatedTempBuffer(); }); ASSERT_NE(nonisolation_allocation, nonisolation_assignment->Allocations().end()); LOG(INFO) << "Non-isolation buffers"; for (const auto& [value, offset_size] : nonisolation_allocation->assigned_buffers()) { LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset << ", size: " << offset_size.size; } BufferAssignment::BufferIsolationOptions isolation_options; isolation_options.hlo_value_compare = [](const HloValue* a, const HloValue* b) { return a->id() < b->id(); }; isolation_options.config.add_isolation_colors(0); isolation_options.config.set_isolation_order_salt(10); isolation_options.config.set_isolation_fuel(5); isolation_options.config.set_isolation_padding_bytes(1024); isolation_options.config.set_base_offset_bytes(12288); std::unique_ptr<BufferAssignment> isolation_assignment = RunBufferAssignmentWithIsolationOptions(m.get(), isolation_options); auto isolation_allocation = absl::c_find_if(isolation_assignment->Allocations(), [](const BufferAllocation& allocation) { return allocation.IsPreallocatedTempBuffer(); }); ASSERT_NE(isolation_allocation, isolation_assignment->Allocations().end()); std::vector<const HloValue*> ordered_values; for (const auto& [value, _] : isolation_allocation->assigned_buffers()) { ordered_values.push_back(value); } absl::c_sort(ordered_values, isolation_options.hlo_value_compare); int i; int64_t expected_offset = nonisolation_allocation->size() + isolation_options.config.base_offset_bytes() + isolation_options.config.isolation_padding_bytes(); ASSERT_GT(ordered_values.size(), isolation_options.config.isolation_fuel()); LOG(INFO) << "Isolation buffers"; for (i = 0; i < isolation_options.config.isolation_fuel(); ++i) { const HloValue* value = ordered_values[i]; auto offset_size = isolation_allocation->assigned_buffers().at(value); LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset << ", size: " << offset_size.size; EXPECT_EQ(offset_size.offset, expected_offset); expected_offset += offset_size.size + isolation_options.config.isolation_padding_bytes(); } for (; i < ordered_values.size(); ++i) { const HloValue* value = ordered_values[i]; auto offset_size = isolation_allocation->assigned_buffers().at(value); auto nonisolation_offset_size = absl::c_find_if( nonisolation_allocation->assigned_buffers(), [&](const auto& pair) { return pair.first->defining_position() == value->defining_position(); }); ASSERT_NE(nonisolation_offset_size, nonisolation_allocation->assigned_buffers().end()); LOG(INFO) << value->ToShortString() << ": off: " << offset_size.offset << ", size: " << offset_size.size; EXPECT_EQ(offset_size.offset, nonisolation_offset_size->second.offset + isolation_options.config.base_offset_bytes()); } } TEST_F(BufferAssignmentTest, BufferInfoStringTest) { absl::string_view module_str = R"( HloModule test_module ENTRY %test_module { %param.0 = s32[1024]{0} parameter(0) %param.1 = s32[1024]{0} parameter(1) %mul = s32[1024]{0} multiply(%param.0, %param.1) %add = s32[1024]{0} add(%mul, %param.0) ROOT %bcast = s32[1024,1024]{1,0} broadcast(s32[1024] %add), dimensions={0} })"; absl::string_view reference_str = R"(buffer_id,buffer_name,offset,size,definition_time,end_time,num_uses,use_times,use_names 0,"<0 param.0 @0>",0,4096,0,5,2,"2;3","mul, operand 0;add, operand 1" 1,"<1 param.1 @0>",0,4096,1,5,1,"2","mul, operand 1" 2,"<2 mul @0>",0,4096,2,3,1,"3","add, operand 0" 3,"<3 add @0>",0,4096,3,4,1,"4","bcast, operand 0" 4,"<4 bcast @0>",0,4194304,4,5,0,"","" )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); HloInstruction* const param0 = FindInstruction(m.get(), "param.0"); HloInstruction* const param1 = FindInstruction(m.get(), "param.1"); HloInstruction* const mul = FindInstruction(m.get(), "mul"); HloInstruction* const add = FindInstruction(m.get(), "add"); HloInstruction* const bcast = FindInstruction(m.get(), "bcast"); auto assignment = RunBufferAssignmentWithInstructionSequence( m.get(), {param0, param1, mul, add, bcast}); const std::string buffer_info_str = assignment->BufferInfoString(); EXPECT_EQ(buffer_info_str, reference_str); } TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) { auto module = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto zero = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto input0 = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape_, "input0")); auto weights0 = builder.AddInstruction( HloInstruction::CreateParameter(1, data_shape_, "weights0")); auto output0 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {})); auto input1 = builder.AddInstruction( HloInstruction::CreateParameter(2, data_shape_, "input1")); auto weights1 = builder.AddInstruction( HloInstruction::CreateParameter(3, data_shape_, "weights1")); auto output1 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, one, {})); auto cond = module->AddEmbeddedComputation(BuildWhileConditionComputation("cond")); auto body = module->AddEmbeddedComputation(BuildWhileBodyComputation("body")); auto tuple0 = builder.AddInstruction( HloInstruction::CreateTuple({input0, weights0, output0})); auto tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({input1, weights1, output1})); auto while0 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple0)); auto while1 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond, body, tuple1)); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, while0, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, while1, 1)); auto root_add = builder.AddInstruction( HloInstruction::CreateBinary(data_shape_, HloOpcode::kAdd, gte0, gte1)); module->AddEntryComputation(builder.Build()); { FlattenCallGraph flatten; TF_ASSERT_OK_AND_ASSIGN(bool result, flatten.Run(module.get())); EXPECT_TRUE(result); } RunCopyInsertion(module.get()); HloSchedule schedule = ScheduleModule(module.get(), ByteSizeOf).value(); schedule.set_sequence( module->entry_computation(), {input1, weights1, one, output1, while1->mutable_operand(0), while1, input0, weights0, zero, output0, while0->mutable_operand(0), while0, gte0, gte1, root_add}); TF_ASSERT_OK(schedule.Verify()); auto assignment = BufferAssigner::Run( module.get(), std::make_unique<SequentialHloOrdering>(schedule), ByteSizeOf, [](LogicalBuffer::Color) { return 1; }, true) .value(); EXPECT_TRUE(BuffersDistinct({while0}, {while1}, *assignment)); } TEST_F(WhileBufferAssignmentTest, WhilesDontShareEntryParamIfLiveOut) { auto module = CreateNewVerifiedModule(); auto builder = HloComputation::Builder("entry"); auto input0 = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape_, "input0")); auto weights0 = builder.AddInstruction( HloInstruction::CreateParameter(1, data_shape_, "weights0")); auto zero = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto output0 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {})); auto output1 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {})); auto cond0 = module->AddEmbeddedComputation(BuildWhileConditionComputation("cond")); auto body0 = module->AddEmbeddedComputation(BuildWhileBodyComputation("body")); auto tuple0 = builder.AddInstruction( HloInstruction::CreateTuple({input0, weights0, output0})); auto while0 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond0, body0, tuple0)); auto while0_out = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, while0, 2)); auto cond1 = module->AddEmbeddedComputation(BuildWhileConditionComputation("cond")); auto body1 = module->AddEmbeddedComputation(BuildWhileBodyComputation("body")); auto tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({while0_out, weights0, output1})); auto while1 = builder.AddInstruction( HloInstruction::CreateWhile(loop_state_shape_, cond1, body1, tuple1)); auto while1_out = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, while1, 2)); module->AddEntryComputation(builder.Build()); RunCopyInsertion(module.get()); auto assignment = RunBufferAssignment(module.get()); auto* root_alloc = assignment->GetUniqueTopLevelSlice(while1_out).value().allocation(); EXPECT_TRUE(root_alloc->maybe_live_out()); EXPECT_FALSE(root_alloc->is_entry_computation_parameter()); } TEST_F(WhileBufferAssignmentTest, WhileWithDynamicUpdateSliceShare) { const char* const hlo_string = R"( HloModule test while_body { state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0) constant.1 = f32[] constant(0) broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={} get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1 get-tuple-element.3 = s32[] get-tuple-element(state), index=0 constant.2 = s32[] constant(128) add.5 = s32[] add(get-tuple-element.3, constant.2) constant.3 = s32[] constant(0) dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3) dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3) ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9) } while_condition { state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0) get-tuple-element = s32[] get-tuple-element(state), index=0 get-tuple-element.1 = s32[] constant(3) ROOT less-than.339.338 = pred[] compare(get-tuple-element, get-tuple-element.1), direction=LT } ENTRY entry_computation { constant.7 = s32[] constant(0) copy.1 = s32[] copy(constant.7) constant.6 = f32[] constant(0) broadcast.6 = f32[1280,1,128]{2,1,0} broadcast(constant.6), dimensions={} tuple.1 = (s32[], f32[1280,1,128]{2,1,0}) tuple(copy.1, broadcast.6) while.0 = (s32[], f32[1280,1,128]{2,1,0}) while(tuple.1), condition=while_condition, body=while_body ROOT get-tuple-element.2 = s32[] get-tuple-element(while.0), index=0 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); RunCopyInsertion(module.get()); auto assignment = RunBufferAssignment(module.get()); auto dus9 = FindInstruction(module.get(), "dynamic-update-slice.9"); auto dus9_alloc_slice = assignment->GetUniqueTopLevelSlice(dus9).value(); auto dus5 = FindInstruction(module.get(), "dynamic-update-slice.5"); auto dus5_alloc_slice = assignment->GetUniqueTopLevelSlice(dus5).value(); EXPECT_EQ(dus9_alloc_slice.allocation(), dus5_alloc_slice.allocation()); EXPECT_EQ(dus9_alloc_slice, dus5_alloc_slice); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/buffer_assignment_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
464b7a60-847b-4ac6-bf92-ce88e1c0c36b
cpp
tensorflow/tensorflow
conditional_canonicalizer
third_party/xla/xla/service/conditional_canonicalizer.cc
third_party/xla/xla/service/conditional_canonicalizer_test.cc
#include "xla/service/conditional_canonicalizer.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/status_macros.h" namespace xla { namespace { absl::Status CanonicalizeNonTupleConditional(HloInstruction* conditional) { TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional); for (auto* branch : conditional->called_computations()) { HloInstruction* root = branch->root_instruction(); TF_RET_CHECK(!root->shape().IsTuple()); HloInstruction* tuple = branch->AddInstruction(HloInstruction::CreateTuple({root})); branch->set_root_instruction(tuple, true); } auto parent = conditional->parent(); const Shape& root_shape = conditional->shape(); auto new_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(&root_shape, 1)); auto new_conditional = parent->AddInstruction(conditional->CloneWithNewShape(new_shape)); auto gte = parent->AddInstruction( HloInstruction::CreateGetTupleElement(root_shape, new_conditional, 0)); TF_RETURN_IF_ERROR(parent->ReplaceInstruction(conditional, gte)); return absl::OkStatus(); } } absl::StatusOr<bool> ConditionalCanonicalizer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 2, "ConditionalCanonicalizer::Run(), before:\n" + module->ToString()); bool changed = false; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { if (inst->opcode() == HloOpcode::kConditional && !inst->shape().IsTuple()) { TF_RETURN_IF_ERROR(CanonicalizeNonTupleConditional(inst)); changed = true; } } } XLA_VLOG_LINES( 2, "ConditionalCanonicalizer::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/conditional_canonicalizer.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_parser.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/test_utils.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "xla/util.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class ConditionalCanonicalizerTest : public HloTestBase { protected: ConditionalCanonicalizerTest() {} }; TEST_F(ConditionalCanonicalizerTest, DenseArrayConditionalRewrite) { auto module = ParseAndReturnVerifiedModule(R"( HloModule _ true_branch { true_param = (s32[3,2]) parameter(0) ROOT root = s32[] constant(0) } false_branch { false_param = (s32[3,2]) parameter(0) ROOT root = s32[] constant(1) } ENTRY entry { param0 = s32[3,2] parameter(0) branch = pred[] constant(false) param_tuple = (s32[3 ,2]) tuple(param0) ROOT conditional = s32[] conditional(branch, param_tuple, param_tuple), true_computation=true_branch, false_computation=false_branch } )") .value(); ConditionalCanonicalizer pass; EXPECT_TRUE(pass.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), op::GetTupleElement(op::Conditional())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
62bf0644-5132-4e87-bff3-4b0982acdd07
cpp
tensorflow/tensorflow
infeed_token_propagation
third_party/xla/xla/service/infeed_token_propagation.cc
third_party/xla/xla/service/infeed_token_propagation_test.cc
#include "xla/service/infeed_token_propagation.h" #include <cstdint> #include <string_view> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/call_graph.h" #include "xla/service/hlo_dce.h" #include "xla/service/tuple_simplifier.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { bool IsDanglingInfeed(HloInstruction* infeed) { CHECK(infeed->opcode() == HloOpcode::kInfeed); if (infeed->has_sharding()) { return false; } if (const HloInstruction* after_all = infeed->operand(0); after_all->opcode() != HloOpcode::kAfterAll || after_all->operand_count() != 0) { return false; } for (const HloInstruction* user : infeed->users()) { if (user->opcode() == HloOpcode::kGetTupleElement && user->tuple_index() == 1) { return false; } } return true; } bool IsDanglingOutfeed(HloInstruction* outfeed) { CHECK(outfeed->opcode() == HloOpcode::kOutfeed); if (outfeed->has_sharding()) { return false; } if (const HloInstruction* after_all = outfeed->operand(1); after_all->opcode() != HloOpcode::kAfterAll || after_all->operand_count() != 0) { return false; } if (outfeed->user_count() != 0) { return false; } return true; } HloInstruction* ReconstructTuple(HloInstruction* tuple) { CHECK(tuple->shape().IsTuple()); HloComputation* computation = tuple->parent(); std::vector<HloInstruction*> gtes; gtes.resize(tuple->shape().tuple_shapes_size()); for (int64_t idx = 0; idx < gtes.size(); ++idx) { gtes[idx] = computation->AddInstruction( HloInstruction::CreateGetTupleElement(tuple, idx)); } return computation->AddInstruction(HloInstruction::CreateTuple(gtes)); } absl::StatusOr<HloInstruction*> InsertTokenIntoTuple(HloInstruction* tuple, bool add_token_operand) { CHECK(tuple->shape().IsTuple()); HloComputation* computation = tuple->parent(); std::vector<HloInstruction*> original_users = tuple->users(); HloInstruction* original_tuple = ReconstructTuple(tuple); for (HloInstruction* original_user : original_users) { for (int64_t idx : original_user->operand_indices(tuple)) { TF_RETURN_IF_ERROR( original_user->ReplaceOperandWith(idx, original_tuple)); } } *tuple->mutable_shape()->add_tuple_shapes() = ShapeUtil::MakeTokenShape(); if (add_token_operand) { tuple->AppendOperand( computation->AddInstruction(HloInstruction::CreateToken())); } HloInstruction* input_token_gte = computation->AddInstruction(HloInstruction::CreateGetTupleElement( tuple, tuple->shape().tuple_shapes_size() - 1)); return input_token_gte; } } absl::Status CanonicalizeConditionalInstruction(HloInstruction* conditional) { CHECK_EQ(conditional->opcode(), HloOpcode::kConditional); for (HloComputation* branch : conditional->branch_computations()) { HloInstruction* parameter = branch->parameter_instruction(0); if (!parameter->shape().IsTuple()) { *parameter->mutable_shape() = ShapeUtil::MakeTupleShape({parameter->shape()}); HloInstruction* original = branch->AddInstruction( HloInstruction::CreateGetTupleElement(parameter, 0)); TF_RETURN_IF_ERROR(parameter->ReplaceAllUsesWithDifferentShape(original)); } int64_t branch_operand_idx = conditional->branch_index(branch) + 1; HloInstruction* branch_tuple = conditional->mutable_operand(branch_operand_idx); if (!branch_tuple->shape().IsTuple()) { branch_tuple = conditional->parent()->AddInstruction( HloInstruction::CreateTuple({branch_tuple})); TF_RETURN_IF_ERROR(conditional->ReplaceOperandWithDifferentShape( branch_operand_idx, branch_tuple)); } if (branch_tuple->opcode() == HloOpcode::kParameter) { branch_tuple = ReconstructTuple(branch_tuple); TF_RETURN_IF_ERROR( conditional->ReplaceOperandWith(branch_operand_idx, branch_tuple)); } HloInstruction* root = branch->root_instruction(); if (root->opcode() != HloOpcode::kTuple) { root = ReconstructTuple(root); branch->set_root_instruction(root); } } CHECK(conditional->shape().IsTuple()); if (conditional->IsRoot()) { HloInstruction* new_root = ReconstructTuple(conditional); conditional->parent()->set_root_instruction(new_root); } return absl::OkStatus(); } absl::Status CanonicalizeWhileInstruction(HloInstruction* loop) { CHECK_EQ(loop->opcode(), HloOpcode::kWhile); HloComputation* body = loop->while_body(); HloComputation* cond = loop->while_condition(); HloInstruction* body_parameter = body->parameter_instruction(0); if (!body_parameter->shape().IsTuple()) { *body_parameter->mutable_shape() = ShapeUtil::MakeTupleShape({body_parameter->shape()}); HloInstruction* original = body->AddInstruction( HloInstruction::CreateGetTupleElement(body_parameter, 0)); TF_RETURN_IF_ERROR( body_parameter->ReplaceAllUsesWithDifferentShape(original)); } HloInstruction* root = body->root_instruction(); if (!root->shape().IsTuple()) { root = body->AddInstruction(HloInstruction::CreateTuple({root})); body->set_root_instruction(root, true); } HloInstruction* cond_parameter = cond->parameter_instruction(0); if (!cond_parameter->shape().IsTuple()) { *cond_parameter->mutable_shape() = ShapeUtil::MakeTupleShape({cond_parameter->shape()}); HloInstruction* original = cond->AddInstruction( HloInstruction::CreateGetTupleElement(cond_parameter, 0)); TF_RETURN_IF_ERROR( cond_parameter->ReplaceAllUsesWithDifferentShape(original)); } if (!loop->shape().IsTuple()) { *loop->mutable_shape() = ShapeUtil::MakeTupleShape({loop->shape()}); HloInstruction* original = loop->parent()->AddInstruction( HloInstruction::CreateGetTupleElement(loop, 0)); TF_RETURN_IF_ERROR(loop->ReplaceAllUsesWithDifferentShape(original)); } HloInstruction* loop_tuple = loop->mutable_operand(0); if (!loop_tuple->shape().IsTuple()) { loop_tuple = loop->parent()->AddInstruction( HloInstruction::CreateTuple({loop_tuple})); TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(0, loop_tuple)); } if (loop_tuple->opcode() == HloOpcode::kParameter) { loop_tuple = ReconstructTuple(loop_tuple); TF_RETURN_IF_ERROR(loop->ReplaceOperandWith(0, loop_tuple)); } if (root->opcode() != HloOpcode::kTuple) { root = ReconstructTuple(root); body->set_root_instruction(root); } if (loop->IsRoot()) { HloInstruction* new_root = ReconstructTuple(loop); loop->parent()->set_root_instruction(new_root); } return absl::OkStatus(); } absl::Status InfeedTokenPropagation::PropagateTokenThroughConditionalBranch() { HloComputation* comp = dangling_instruction_->parent(); dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0]; CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kConditional); for (HloComputation* branch : dangling_instruction_->branch_computations()) { HloInstruction* root = branch->root_instruction(); if (branch == comp) { TF_RETURN_IF_ERROR( InsertTokenIntoTuple(root, false).status()); root->AppendOperand(output_token_); } else { TF_RETURN_IF_ERROR( InsertTokenIntoTuple(root, true).status()); } } HloInstruction* parameter = comp->parameter_instruction(0); TF_ASSIGN_OR_RETURN( HloInstruction * input_token_gte, InsertTokenIntoTuple(parameter, false)); TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte)); int64_t branch_operand_idx = dangling_instruction_->branch_index(comp) + 1; HloInstruction* branch_tuple = dangling_instruction_->mutable_operand(branch_operand_idx); TF_ASSIGN_OR_RETURN( HloInstruction * next_input_token_gte, InsertTokenIntoTuple(branch_tuple, true)); TF_RETURN_IF_ERROR(dangling_instruction_->ReplaceOperandWithDifferentShape( branch_operand_idx, branch_tuple)); input_token_ = branch_tuple->mutable_operand(next_input_token_gte->tuple_index()); TF_ASSIGN_OR_RETURN( output_token_, InsertTokenIntoTuple(dangling_instruction_, false)); return absl::OkStatus(); } absl::Status InfeedTokenPropagation::PropagateTokenThroughWhileBody() { HloComputation* comp = dangling_instruction_->parent(); dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0]; CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kWhile); HloInstruction* root = comp->root_instruction(); TF_RETURN_IF_ERROR( InsertTokenIntoTuple(root, false).status()); root->AppendOperand(output_token_); HloInstruction* body_parameter = comp->parameter_instruction(0); TF_ASSIGN_OR_RETURN( HloInstruction * input_token_gte, InsertTokenIntoTuple(body_parameter, false)); TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte)); HloComputation* cond = dangling_instruction_->while_condition(); HloInstruction* cond_parameter = cond->parameter_instruction(0); TF_RETURN_IF_ERROR( InsertTokenIntoTuple(cond_parameter, false) .status()); HloInstruction* while_tuple = dangling_instruction_->mutable_operand(0); TF_ASSIGN_OR_RETURN( input_token_, InsertTokenIntoTuple(while_tuple, true)); TF_RETURN_IF_ERROR( dangling_instruction_->ReplaceOperandWithDifferentShape(0, while_tuple)); TF_ASSIGN_OR_RETURN( output_token_, InsertTokenIntoTuple(dangling_instruction_, false)); return absl::OkStatus(); } absl::Status InfeedTokenPropagation::PropagateToken() { HloComputation* comp = dangling_instruction_->parent(); if (comp->IsEntryComputation()) { return absl::OkStatus(); } VLOG(2) << "Propagating tokens for: " << dangling_instruction_->name(); HloInstruction* caller = call_graph_->GetComputationCallers(comp)[0]; if (caller->has_sharding()) { return absl::OkStatus(); } if (caller->opcode() == HloOpcode::kConditional) { TF_RETURN_IF_ERROR(CanonicalizeConditionalInstruction(caller)); TF_RETURN_IF_ERROR(PropagateTokenThroughConditionalBranch()); } else if (caller->opcode() == HloOpcode::kWhile && comp == caller->while_body()) { TF_RETURN_IF_ERROR(CanonicalizeWhileInstruction(caller)); TF_RETURN_IF_ERROR(PropagateTokenThroughWhileBody()); } else { VLOG(2) << "Unhandled computation: " << comp->name(); return absl::OkStatus(); } return PropagateToken(); } absl::StatusOr<bool> InfeedTokenPropagation::Run( HloModule* module, const absl::flat_hash_set<std::string_view>& execution_threads) { VLOG(5) << "Before InfeedTokenPropagation:"; XLA_VLOG_LINES(5, module->ToString()); std::vector<HloInstruction*> dangling_infeeds; std::vector<HloInstruction*> dangling_outfeeds; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { if (!computation->IsEntryComputation()) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kInfeed && IsDanglingInfeed(instruction)) { VLOG(1) << "Found dangling infeed: " << instruction->ToString(); dangling_infeeds.push_back(instruction); } else if (instruction->opcode() == HloOpcode::kOutfeed && IsDanglingOutfeed(instruction)) { VLOG(1) << "Found dangling outfeed: " << instruction->ToString(); dangling_outfeeds.push_back(instruction); } } } } bool changed = !dangling_infeeds.empty() || !dangling_outfeeds.empty(); if (changed) { call_graph_ = CallGraph::Build(module); if (!call_graph_->IsFlattened()) { return FailedPrecondition( "Call graph must be flattened before infeed token propagation."); } } for (HloInstruction* dangling_infeed : dangling_infeeds) { dangling_instruction_ = dangling_infeed; input_token_ = dangling_infeed->mutable_operand(0); output_token_ = dangling_infeed->AddInstruction( HloInstruction::CreateGetTupleElement(dangling_infeed, 1)); TF_RETURN_IF_ERROR(PropagateToken()); } for (HloInstruction* dangling_outfeed : dangling_outfeeds) { dangling_instruction_ = dangling_outfeed; input_token_ = dangling_outfeed->mutable_operand(1); output_token_ = dangling_outfeed; TF_RETURN_IF_ERROR(PropagateToken()); } if (changed) { TF_RETURN_IF_ERROR( TupleSimplifier().Run(module, execution_threads).status()); TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status()); } VLOG(5) << "After InfeedTokenPropagation:"; XLA_VLOG_LINES(5, module->ToString()); return changed; } }
#include "xla/service/infeed_token_propagation.h" #include <string_view> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { class InfeedTokenPropagationTest : public HloTestBase { protected: InfeedTokenPropagationTest() = default; }; TEST_F(InfeedTokenPropagationTest, EntryComputationInfeed) { constexpr std::string_view hlo = R"( HloModule main ENTRY main { token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT gte.0 = get-tuple-element(infeed.0), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(InfeedTokenPropagationTest, EntryComputationOutfeed) { constexpr std::string_view hlo = R"( HloModule main ENTRY main { arg.0 = s32[] parameter(0) tuple.0 = tuple(arg.0) token.0 = after-all() outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[]) ROOT tuple.1 = tuple() } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(InfeedTokenPropagationTest, ConditionalInfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { pred.0 = pred[] constant(true) true_tuple.0 = tuple() false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1))); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, ConditionalOutfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = (s32[]) parameter(0) token.0 = after-all() outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[]) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { arg.0 = s32[] parameter(0) pred.0 = pred[] constant(true) true_tuple.0 = tuple(arg.0) false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, ConditionalDuplicateOperand) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { pred.0 = pred[] constant(true) tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, tuple.0, tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); const HloInstruction* true_tuple = cond->operand(1); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken()); const HloInstruction* false_tuple = cond->operand(2); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1))); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, NonTupleConditional) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = s32[] parameter(0) outfeed_tuple.0 = tuple(arg.0) token.0 = after-all() outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[]) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { arg.0 = s32[] parameter(0) pred.0 = pred[] constant(true) false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, arg.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = cond->mutable_operand(1); EXPECT_TRUE(true_tuple->shape().IsTuple()); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, DisjointConditionalOutfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { ROOT arg.0 = () parameter(0) one.0 = s32[] constant(1) outfeed_tuple.0 = tuple(one.0) token.0 = after-all() outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[]) } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { pred.0 = pred[] constant(true) true_tuple.0 = tuple() false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, WhileInfeed) { constexpr std::string_view hlo = R"( HloModule main comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT tuple.0 = tuple() } cond { arg.0 = () parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { while_tuple.0 = tuple() ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1))); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken()); } TEST_F(InfeedTokenPropagationTest, WhileOutfeed) { constexpr std::string_view hlo = R"( HloModule main comp { arg.0 = (s32[]) parameter(0) token.0 = after-all() outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[]) gte.0 = get-tuple-element(arg.0), index=0 ROOT tuple.0 = tuple(gte.0) } cond { arg.0 = (s32[]) parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { arg.0 = s32[] parameter(0) while_tuple.0 = tuple(arg.0) ROOT while.0 = (s32[]) while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(), op::Outfeed())); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken()); } TEST_F(InfeedTokenPropagationTest, DisjointWhileOutfeed) { constexpr std::string_view hlo = R"( HloModule main comp { ROOT arg.0 = () parameter(0) one.0 = s32[] constant(1) outfeed_tuple.0 = tuple(one.0) token.0 = after-all() outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[]) } cond { arg.0 = () parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { while_tuple.0 = tuple() ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::Outfeed())); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken()); } TEST_F(InfeedTokenPropagationTest, NonTupleWhile) { constexpr std::string_view hlo = R"( HloModule main comp { ROOT arg.0 = s32[] parameter(0) tuple.0 = tuple(arg.0) token.0 = after-all() outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[]) } cond { arg.0 = s32[] parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { arg.0 = s32[] parameter(0) ROOT while.0 = s32[] while(arg.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_TRUE(loop->shape().IsTuple()); EXPECT_EQ(loop->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken()); EXPECT_THAT(loop->operand(0), op::Tuple(op::Parameter(), op::AfterAll())); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(), op::Outfeed())); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken()); } TEST_F(InfeedTokenPropagationTest, NestedInfeedOutfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = (s32[]) parameter(0) token.0 = after-all() outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[]) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) gte.0 = get-tuple-element(infeed.0), index=0 pred.0 = pred[] constant(true) true_tuple.0 = tuple(gte.0) false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } cond { arg.0 = () parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { while_tuple.0 = tuple() ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken()); EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken()); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1), op::GetTupleElement(op::Conditional(), 0))); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a9a26e45-6caf-4a8e-84d6-4f99d3d89f0a
cpp
tensorflow/tensorflow
while_loop_constant_sinking
third_party/xla/xla/service/while_loop_constant_sinking.cc
third_party/xla/xla/service/while_loop_constant_sinking_test.cc
#include "xla/service/while_loop_constant_sinking.h" #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "xla/service/while_util.h" #include "xla/shape_util.h" #include "xla/util.h" namespace xla { namespace { absl::Status ReplaceUsesWhileKeepingLoopInvariance( HloInstruction* old_instr, HloInstruction* new_instr, HloInstruction* while_body_root, int64_t tuple_index) { CHECK_EQ(while_body_root->opcode(), HloOpcode::kTuple); std::vector<HloInstruction*> users; users.reserve(old_instr->user_count()); absl::c_copy(old_instr->users(), std::back_inserter(users)); for (auto* user : users) { for (int64_t i = 0, e = user->operand_count(); i < e; i++) { if (user->operand(i) == old_instr && !(user == while_body_root && i == tuple_index)) { TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, new_instr)); } } } return absl::OkStatus(); } HloInstruction* CloneHelper(const HloInstruction* instruction, HloComputation* computation) { if (instruction->opcode() == HloOpcode::kConstant) { return computation->AddInstruction(instruction->Clone(".sunk")); } if (instruction->opcode() == HloOpcode::kBroadcast) { return computation->AddInstruction(instruction->CloneWithNewOperands( instruction->shape(), {CloneHelper(instruction->operand(0), computation)})); } LOG(FATAL) << "Unexpected instruction."; } } absl::StatusOr<bool> WhileLoopConstantSinking::TrySinkingConstantsIntoWhileLoop( HloInstruction* while_instr) { HloComputation* while_cond = while_instr->while_condition(); HloComputation* while_body = while_instr->while_body(); const HloInstruction& init_value = *while_instr->operand(0); if (init_value.opcode() != HloOpcode::kTuple) { return false; } bool changed = false; absl::flat_hash_map<int64_t, absl::InlinedVector<HloInstruction*, 1>> conditional_gte_index_to_insts = WhileUtil::GetGTEsMapForWhileConditional(*while_cond); std::vector<HloInstruction*> invariant_body_gtes = WhileUtil::GetInvariantGTEsForWhileBody(*while_body); for (HloInstruction* invariant_body_gte : invariant_body_gtes) { int64_t index = invariant_body_gte->tuple_index(); const HloInstruction& invariant_value = *init_value.operand(index); if (invariant_value.opcode() != HloOpcode::kConstant && (!sink_broadcast_of_constants_ || invariant_value.opcode() != HloOpcode::kBroadcast || invariant_value.operand(0)->opcode() != HloOpcode::kConstant)) { continue; } if (sink_only_scalar_constants_) { if (!ShapeUtil::IsScalar(init_value.operand(index)->shape())) { continue; } } if (invariant_body_gte->user_count() > 1) { HloInstruction* constant_instr = CloneHelper(&invariant_value, while_body); TF_RETURN_IF_ERROR(ReplaceUsesWhileKeepingLoopInvariance( invariant_body_gte, constant_instr, while_body->root_instruction(), index)); changed = true; } auto it = conditional_gte_index_to_insts.find(index); if (it == conditional_gte_index_to_insts.end()) { continue; } for (HloInstruction* invariant_cond_gte : it->second) { if (invariant_cond_gte->user_count() > 0) { HloInstruction* constant_instr = CloneHelper(&invariant_value, while_cond); TF_RETURN_IF_ERROR( invariant_cond_gte->ReplaceAllUsesWith(constant_instr)); changed = true; } } } return changed; } absl::StatusOr<bool> WhileLoopConstantSinking::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(2) << "HLO module before WhileLoopConstantSinking:"; XLA_VLOG_LINES(2, module->ToString()); bool changed = false; std::vector<HloInstruction*> while_instrs; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs), HloPredicateIsOp<HloOpcode::kWhile>); } for (HloInstruction* while_instr : while_instrs) { TF_ASSIGN_OR_RETURN(bool result, TrySinkingConstantsIntoWhileLoop(while_instr)); changed |= result; } if (changed) { VLOG(2) << "HLO module after WhileLoopConstantSinking:"; XLA_VLOG_LINES(2, module->ToString()); } else { VLOG(2) << "HLO module unchanged after WhileLoopConstantSinking"; } return changed; } }
#include "xla/service/while_loop_constant_sinking.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; using ::testing::_; using WhileLoopConstantSinkingTest = HloTestBase; TEST_F(WhileLoopConstantSinkingTest, SinkOneConstant) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[2],f32[2]) parameter(0) p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0 p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1 add.0 = f32[2] add(p_body.0, p_body.1) ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1) } condition { p_cond = (f32[2],f32[2]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[2] constant({1, 2}) const_1 = f32[2] constant({2, 1}) while_init = (f32[2],f32[2]) tuple(const_0, const_1) ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, WhileLoopConstantSinking(false, true) .Run(module.get())); ASSERT_FALSE(changed); TF_ASSERT_OK_AND_ASSIGN( changed, WhileLoopConstantSinking(false, false) .Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::Add(_, op::Constant()), _)); } TEST_F(WhileLoopConstantSinkingTest, SinkBroadcastOfConstant) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[16],f32[16]) parameter(0) p_body.0 = get-tuple-element(p_body), index=0 p_body.1 = get-tuple-element(p_body), index=1 add.0 = add(p_body.0, p_body.1) ROOT root = tuple(add.0, p_body.1) } condition { p_cond = (f32[16],f32[16]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[] constant(1) const_1 = f32[] constant(2) broadcast_0 = f32[16] broadcast(const_0), dimensions={} broadcast_1 = f32[16] broadcast(const_1), dimensions={} while_init = tuple(broadcast_0, broadcast_1) ROOT while = while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, WhileLoopConstantSinking(false) .Run(module.get())); ASSERT_FALSE(changed); TF_ASSERT_OK_AND_ASSIGN( changed, WhileLoopConstantSinking(true) .Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::Add(_, op::Broadcast(op::Constant())), _)); } TEST_F(WhileLoopConstantSinkingTest, KeepConstantsLoopInvariant) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[2],f32[2],f32[2]) parameter(0) p_body.0 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=0 p_body.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=1 p_body.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_body), index=2 add.0 = f32[2] add(p_body.1, p_body.2) ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_body.1, p_body.2) } condition { p_cond = (f32[2],f32[2],f32[2]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[2] constant({1, 2}) const_1 = f32[2] constant({2, 1}) const_2 = f32[2] constant({3, 1}) while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2) ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::Add(op::Constant(), op::Constant()), op::GetTupleElement(op::Parameter(0)), op::GetTupleElement(op::Parameter(0)))); } TEST_F(WhileLoopConstantSinkingTest, TupleShapedConstants) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_b = (f32[2],(f32[2],f32[2])) parameter(0) p_b.0 = f32[2] get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=0 p_b.1 = (f32[2],f32[2]) get-tuple-element((f32[2],(f32[2],f32[2])) p_b), index=1 p_b.1.1 = f32[2] get-tuple-element(p_b.1), index=0 ROOT root = (f32[2],(f32[2],f32[2])) tuple(p_b.1.1, p_b.1) } condition { p_cond = (f32[2],(f32[2],f32[2])) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[2] constant({1, 2}) const_1 = (f32[2], f32[2]) constant(({2, 1},{3,1})) while_init = (f32[2],(f32[2],f32[2])) tuple(const_0, const_1) ROOT while = (f32[2],(f32[2],f32[2])) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::GetTupleElement(op::Constant(), 0), op::GetTupleElement(op::Parameter(0)))); } TEST_F(WhileLoopConstantSinkingTest, DuplicateGTEs) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_b = (f32[2],f32[2],f32[2]) parameter(0) p_b.1 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=1 p_b.2 = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2 p_b.2.dup = f32[2] get-tuple-element((f32[2],f32[2],f32[2]) p_b), index=2 add.0 = f32[2] add(p_b.1, p_b.2.dup) ROOT root = (f32[2],f32[2],f32[2]) tuple(add.0, p_b.1, p_b.2) } condition { p_cond = (f32[2],f32[2],f32[2]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[2] constant({1, 2}) const_1 = f32[2] constant({2, 1}) const_2 = f32[2] constant({3, 1}) while_init = (f32[2],f32[2],f32[2]) tuple(const_0, const_1, const_2) ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::Add(op::Constant(), ::testing::Not(op::Constant())), op::GetTupleElement(op::Parameter(0)), op::GetTupleElement(op::Parameter(0)))); } TEST_F(WhileLoopConstantSinkingTest, DontCreateDeadConstant) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[2],f32[2]) parameter(0) p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0 p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1 token0 = token[] after-all() outfeed = token[] outfeed(p_body.0, token0) ROOT root = (f32[2],f32[2],f32[2]) tuple(p_body.0, p_body.1, p_body.1) } condition { p_cond = (f32[2],f32[2]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[2] constant({1, 2}) const_1 = f32[2] constant({2, 1}) while_init = (f32[2],f32[2]) tuple(const_0, const_1) ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::GetTupleElement(), op::GetTupleElement(), op::GetTupleElement())); for (const HloInstruction* inst : while_body->instructions()) { if (inst->opcode() == HloOpcode::kConstant) { EXPECT_GT(inst->user_count(), 0); } } } TEST_F(WhileLoopConstantSinkingTest, ConditionalSinkConstant) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[],f32[]) parameter(0) p_body.0 = f32[] get-tuple-element((f32[],f32[]) p_body), index=0 const = f32[] constant(1) add = f32[] add(p_body.0, const) p_body.1 = f32[] get-tuple-element((f32[],f32[]) p_body), index=1 ROOT root = (f32[],f32[]) tuple(add, p_body.1) } condition { p_cond = (f32[],f32[]) parameter(0) p_cond.0 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=0 p_cond.1 = f32[] get-tuple-element((f32[],f32[]) p_cond), index=1 ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT } ENTRY entry { const_0 = f32[] constant(0) const_1 = f32[] constant(10) while_init = (f32[],f32[]) tuple(const_0, const_1) ROOT while = (f32[],f32[]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_condition = module->GetComputationWithName("condition"); EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant())); } TEST_F(WhileLoopConstantSinkingTest, ConditionalTupleShapedConstants) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_b = (f32[],(f32[],f32[])) parameter(0) p_b.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_b), index=0 p_b.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_b), index=1 p_b.1.0 = f32[] get-tuple-element((f32[],f32[]) p_b.1), index=0 add = f32[] add(p_b.0, p_b.1.0) ROOT root = (f32[],(f32[],f32[])) tuple(add, p_b.1) } condition { p_c = (f32[],(f32[],f32[])) parameter(0) p_c.0 = f32[] get-tuple-element((f32[],(f32[],f32[])) p_c), index=0 p_c.1 = (f32[],f32[]) get-tuple-element((f32[],(f32[],f32[])) p_c), index=1 p_c.1.1 = f32[] get-tuple-element((f32[],f32[]) p_c.1), index=1 ROOT result = pred[] compare(p_c.0, p_c.1.1), direction=LT } ENTRY entry { const_0 = f32[] constant(0) const_1 = (f32[], f32[]) constant((1, 10)) while_init = (f32[],(f32[],f32[])) tuple(const_0, const_1) ROOT while = (f32[],(f32[],f32[])) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_condition = module->GetComputationWithName("condition"); EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::GetTupleElement(op::Constant()))); } TEST_F(WhileLoopConstantSinkingTest, ConditionalDontCreateDeadConstant) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[],f32[],f32[]) parameter(0) p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0 const = f32[] constant(1) add = f32[] add(p_body.0, const) p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1 p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2 ROOT root = (f32[],f32[],f32[]) tuple(add, p_body.1, p_body.2) } condition { p_cond = (f32[],f32[],f32[]) parameter(0) p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0 p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1 p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2 ROOT result = pred[] compare(p_cond.0, p_cond.1), direction=LT } ENTRY entry { const_0 = f32[] constant(0) const_1 = f32[] constant(10) const_2 = f32[] constant(12) while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2) ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_condition = module->GetComputationWithName("condition"); EXPECT_THAT(while_condition->root_instruction(), op::Lt(_, op::Constant())); for (const HloInstruction* inst : while_condition->instructions()) { if (inst->opcode() == HloOpcode::kConstant) { EXPECT_GT(inst->user_count(), 0); } } } TEST_F(WhileLoopConstantSinkingTest, ConditionalMultipleSameIndexGTEs) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[],f32[],f32[]) parameter(0) p_body.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=0 const = f32[] constant(1) add.0 = f32[] add(p_body.0, const) p_body.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=1 add.1 = f32[] add(p_body.1, const) p_body.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_body), index=2 ROOT root = (f32[],f32[],f32[]) tuple(add.0, add.1, p_body.2) } condition { p_cond = (f32[],f32[],f32[]) parameter(0) p_cond.0 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=0 p_cond.2 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2 lt.0 = pred[] compare(p_cond.0, p_cond.2), direction=LT p_cond.1 = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=1 p_cond.2.c = f32[] get-tuple-element((f32[],f32[],f32[]) p_cond), index=2 lt.1 = pred[] compare(p_cond.1, p_cond.2.c), direction=LT ROOT result = pred[] and(lt.0, lt.1) } ENTRY entry { const_0 = f32[] constant(0) const_1 = f32[] constant(0) const_2 = f32[] constant(12) while_init = (f32[],f32[],f32[]) tuple(const_0, const_1, const_2) ROOT while = (f32[],f32[],f32[]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConstantSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_condition = module->GetComputationWithName("condition"); EXPECT_THAT(while_condition->root_instruction(), op::And(op::Lt(_, op::Constant()), op::Lt(_, op::Constant()))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_constant_sinking_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9131b767-6a50-48e5-8e87-4d8d59db6611
cpp
tensorflow/tensorflow
rendezvous
tensorflow/core/framework/rendezvous.cc
tensorflow/core/framework/rendezvous_test.cc
#include "tensorflow/core/framework/rendezvous.h" #include <deque> #include <functional> #include <utility> #include <vector> #include "tensorflow/core/framework/local_rendezvous.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/lib/gtl/flatmap.h" #include "tensorflow/core/lib/gtl/manual_constructor.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { Rendezvous::ParsedKey& Rendezvous::ParsedKey::operator=(const ParsedKey& b) { const char* b_base = b.buf_.data(); buf_ = b.buf_; src_device = StringPiece(buf_.data() + (b.src_device.data() - b_base), b.src_device.size()); src = b.src; src_incarnation = b.src_incarnation; dst_device = StringPiece(buf_.data() + (b.dst_device.data() - b_base), b.dst_device.size()); dst = b.dst; edge_name = StringPiece(buf_.data() + (b.edge_name.data() - b_base), b.edge_name.size()); return *this; } string Rendezvous::CreateKey(const string& src_device, uint64 src_incarnation, const string& dst_device, const string& name, const FrameAndIter& frame_iter) { char buf[strings::kFastToBufferSize]; return strings::StrCat( src_device, ";", strings::Uint64ToHexString(src_incarnation, buf), ";", dst_device, ";", name, ";", frame_iter.frame_id, ":", frame_iter.iter_id); } static StringPiece ConsumeNextPart(StringPiece* s, char delim) { for (size_t offset = 0; offset < s->size(); offset++) { if ((*s)[offset] == delim) { StringPiece result(s->data(), offset); s->remove_prefix(offset + 1); return result; } } StringPiece result(s->data(), s->size()); s->remove_prefix(s->size()); return result; } Status Rendezvous::ParseKey(StringPiece key, ParsedKey* out) { if (key.data() == out->buf_.data()) { DCHECK_EQ(key.size(), out->buf_.size()); } else { out->buf_.assign(key.data(), key.size()); } StringPiece s(out->buf_); StringPiece parts[5]; for (int i = 0; i < 5; i++) { parts[i] = ConsumeNextPart(&s, ';'); } if (s.empty() && !parts[4].empty() && DeviceNameUtils::ParseFullName(parts[0], &out->src) && strings::HexStringToUint64(parts[1], &out->src_incarnation) && DeviceNameUtils::ParseFullName(parts[2], &out->dst) && !parts[3].empty()) { out->src_device = StringPiece(parts[0].data(), parts[0].size()); out->dst_device = StringPiece(parts[2].data(), parts[2].size()); out->edge_name = StringPiece(parts[3].data(), parts[3].size()); return absl::OkStatus(); } return errors::InvalidArgument("Invalid rendezvous key: ", key); } RendezvousInterface::~RendezvousInterface() {} Status RendezvousInterface::Recv(const ParsedKey& key, const Args& recv_args, Tensor* val, bool* is_dead, int64_t timeout_ms) { Status ret; Notification n; RecvAsync(key, recv_args, [&ret, &n, val, is_dead](const Status& s, const Args& send_args, const Args& recv_args, const Tensor& v, const bool dead) { ret = s; *val = v; *is_dead = dead; n.Notify(); }); if (timeout_ms > 0) { int64_t timeout_us = timeout_ms * 1000; bool notified = WaitForNotificationWithTimeout(&n, timeout_us); if (!notified) { return Status(absl::StatusCode::kDeadlineExceeded, "Timed out waiting for notification"); } } else { n.WaitForNotification(); } return ret; } Status RendezvousInterface::Recv(const ParsedKey& key, const Args& args, Tensor* val, bool* is_dead) { const int64_t no_timeout = 0; return Recv(key, args, val, is_dead, no_timeout); } namespace { class LocalRendezvousWrapper : public Rendezvous { public: LocalRendezvousWrapper(int num_shards) : impl_(this, num_shards) {} Status Send(const ParsedKey& key, const Args& send_args, const Tensor& val, const bool is_dead) override { return impl_.Send(key, send_args, val, is_dead); } void RecvAsync(const ParsedKey& key, const Args& recv_args, DoneCallback done) override { impl_.RecvAsync(key, recv_args, std::move(done)); } void StartAbort(const Status& status) override { impl_.StartAbort(status); } private: LocalRendezvous impl_; LocalRendezvousWrapper(const LocalRendezvousWrapper&) = delete; void operator=(const LocalRendezvousWrapper&) = delete; }; } Rendezvous* NewLocalRendezvous(int num_shards) { return new LocalRendezvousWrapper(num_shards); } }
#include "tensorflow/core/framework/rendezvous.h" #include "absl/status/status.h" #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/cancellation.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/notification.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { TEST(RendezvousTest, Key) { const string key = Rendezvous::CreateKey( "/job:mnist/replica:1/task:2/CPU:0", 7890, "/job:mnist/replica:1/task:2/device:GPU:0", "var0", FrameAndIter(0, 0)); EXPECT_EQ(key, "/job:mnist/replica:1/task:2/CPU:0;" "0000000000001ed2;" "/job:mnist/replica:1/task:2/device:GPU:0;" "var0;" "0:0"); Rendezvous::ParsedKey parsed; TF_EXPECT_OK(Rendezvous::ParseKey(key, &parsed)); EXPECT_EQ(parsed.src_device, "/job:mnist/replica:1/task:2/CPU:0"); EXPECT_EQ(parsed.src_incarnation, 7890); EXPECT_EQ(parsed.src.type, "CPU"); EXPECT_EQ(parsed.dst_device, "/job:mnist/replica:1/task:2/device:GPU:0"); EXPECT_EQ(parsed.dst.type, "GPU"); EXPECT_FALSE(Rendezvous::ParseKey("foo;bar;baz", &parsed).ok()); EXPECT_FALSE(Rendezvous::ParseKey("/job:mnist/replica:1/task:2/CPU:0;" "/job:mnist/replica:1/task:2/device:GPU:0;", &parsed) .ok()); EXPECT_FALSE( Rendezvous::ParseKey(strings::StrCat(key, ";", key), &parsed).ok()); } class LocalRendezvousTest : public ::testing::Test { public: LocalRendezvousTest() : threads_(Env::Default(), "test", 16) { rendez_ = NewLocalRendezvous(); } ~LocalRendezvousTest() override { rendez_->Unref(); } void SchedClosure(std::function<void()> fn) { threads_.Schedule(std::move(fn)); } Rendezvous* rendez_; private: thread::ThreadPool threads_; }; Tensor V(const string& content) { Tensor tensor(DT_STRING, TensorShape({})); tensor.scalar<tstring>()() = content; return tensor; } string V(const Tensor& tensor) { CHECK_EQ(tensor.dtype(), DT_STRING); CHECK(TensorShapeUtils::IsScalar(tensor.shape())); return tensor.scalar<tstring>()(); } Rendezvous::ParsedKey MakeKey(const string& name) { string s = Rendezvous::CreateKey("/job:mnist/replica:1/task:2/CPU:0", 7890, "/job:mnist/replica:1/task:2/device:GPU:0", name, FrameAndIter(0, 0)); Rendezvous::ParsedKey k; TF_EXPECT_OK(Rendezvous::ParseKey(s, &k)); return k; } const Rendezvous::ParsedKey& KeyFoo() { static auto* key = new Rendezvous::ParsedKey(MakeKey("foo")); return *key; } const Rendezvous::ParsedKey& KeyBar() { static auto* key = new Rendezvous::ParsedKey(MakeKey("bar")); return *key; } TEST_F(LocalRendezvousTest, SendRecv) { Rendezvous::Args args; TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false)); Tensor val(DT_STRING); bool is_dead = false; TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead)); EXPECT_EQ("hello", V(val)); } TEST_F(LocalRendezvousTest, RecvSend) { SchedClosure([this]() { Env::Default()->SleepForMicroseconds(10000); Rendezvous::Args args; TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false)); }); Tensor val(DT_STRING); bool is_dead = false; Rendezvous::Args args; TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead)); EXPECT_EQ("hello", V(val)); } TEST_F(LocalRendezvousTest, PingPong) { SchedClosure([this]() { Tensor t(DT_STRING); bool is_dead = false; Rendezvous::Args args; TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &t, &is_dead)); TF_ASSERT_OK(rendez_->Send(KeyBar(), args, t, is_dead)); }); Env::Default()->SleepForMicroseconds(1000000); Tensor val(DT_STRING); bool val_dead = false; Rendezvous::Args args; TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("secret msg"), val_dead)); TF_ASSERT_OK(rendez_->Recv(KeyBar(), args, &val, &val_dead)); EXPECT_EQ("secret msg", V(val)); } TEST_F(LocalRendezvousTest, CancelBeforeRecv) { auto* cm = new CancellationManager(); Tensor val(DT_STRING); bool is_dead = false; Rendezvous::Args args; args.cancellation_manager = cm; cm->StartCancel(); auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead); EXPECT_FALSE(s.ok()); EXPECT_TRUE(absl::IsCancelled(s)); EXPECT_EQ("RecvAsync is cancelled.", s.message()); delete cm; } TEST_F(LocalRendezvousTest, CancelAfterRecv) { auto* cm = new CancellationManager(); Notification n; SchedClosure([cm, &n]() { Env::Default()->SleepForMicroseconds(10000); cm->StartCancel(); n.Notify(); }); Tensor val(DT_STRING); bool is_dead = false; Rendezvous::Args args; args.cancellation_manager = cm; auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead); EXPECT_FALSE(s.ok()); EXPECT_TRUE(absl::IsCancelled(s)); EXPECT_EQ("RecvAsync is cancelled.", s.message()); n.WaitForNotification(); delete cm; } TEST_F(LocalRendezvousTest, CancelEmptyQueue) { auto* cm = new CancellationManager(); Notification n; SchedClosure([this, cm, &n]() { Env::Default()->SleepForMicroseconds(10000); Rendezvous::Args args; TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false)); cm->StartCancel(); n.Notify(); }); Tensor val(DT_STRING); bool is_dead = false; Rendezvous::Args args; args.cancellation_manager = cm; TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead)); EXPECT_EQ("hello", V(val)); n.WaitForNotification(); delete cm; } TEST_F(LocalRendezvousTest, CancelMultiple) { auto* cm = new CancellationManager(); SchedClosure([this, cm]() { Env::Default()->SleepForMicroseconds(10000); Rendezvous::Args args; cm->StartCancel(); TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false)); TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false)); }); Tensor val(DT_STRING); Rendezvous::Args args; Rendezvous::Args args_with_cancellation; args_with_cancellation.cancellation_manager = cm; Notification n0; Notification n1; Notification n2; Notification n3; Status s0; Status s1; Status s2; Status s3; rendez_->RecvAsync( KeyFoo(), args, [&n0, &s0](const Status& s, const Rendezvous::Args& send_args, const Rendezvous::Args& recv_args, const Tensor& v, const bool dead) { s0.Update(s); n0.Notify(); }); rendez_->RecvAsync( KeyFoo(), args_with_cancellation, [&n1, &s1](const Status& s, const Rendezvous::Args& send_args, const Rendezvous::Args& recv_args, const Tensor& v, const bool dead) { s1.Update(s); n1.Notify(); }); rendez_->RecvAsync( KeyFoo(), args, [&n2, &s2](const Status& s, const Rendezvous::Args& send_args, const Rendezvous::Args& recv_args, const Tensor& v, const bool dead) { s2.Update(s); n2.Notify(); }); rendez_->RecvAsync( KeyFoo(), args_with_cancellation, [&n3, &s3](const Status& s, const Rendezvous::Args& send_args, const Rendezvous::Args& recv_args, const Tensor& v, const bool dead) { s3.Update(s); n3.Notify(); }); n0.WaitForNotification(); n1.WaitForNotification(); n2.WaitForNotification(); n3.WaitForNotification(); TF_ASSERT_OK(s0); TF_ASSERT_OK(s2); EXPECT_FALSE(s1.ok()); EXPECT_FALSE(s3.ok()); delete cm; } struct BlockingState { mutex lock; int counter = 0; Notification done; }; TEST_F(LocalRendezvousTest, RandomSendRecv) { static const int N = 100; random::PhiloxRandom philox(testing::RandomSeed(), 17); random::SimplePhilox rnd(&philox); BlockingState state; state.counter = N; for (int i = 0; i < N; ++i) { int micros = 100 + rnd.Uniform(1000); SchedClosure([this, i, micros]() { Env::Default()->SleepForMicroseconds(micros); Rendezvous::Args args; TF_ASSERT_OK(rendez_->Send(MakeKey(strings::StrCat(i)), args, V(strings::StrCat(i)), false)); }); auto recv_done = [this, &state, i](const Status& status, const Rendezvous::Args& sender_args, const Rendezvous::Args& recver_args, const Tensor& val, const bool val_dead) { EXPECT_EQ(strings::StrCat(i), V(val)); bool done = false; { mutex_lock l(state.lock); state.counter--; if (state.counter == 0) { done = true; } } if (done) { state.done.Notify(); } }; micros = 100 + rnd.Uniform(1000); SchedClosure([this, i, micros, recv_done]() { Env::Default()->SleepForMicroseconds(micros); rendez_->RecvAsync(MakeKey(strings::StrCat(i)), Rendezvous::Args(), recv_done); }); } state.done.WaitForNotification(); } void RandomSleep() { if (std::rand() % 10 == 0) { Env::Default()->SleepForMicroseconds(1000); } } TEST_F(LocalRendezvousTest, MultiSends) { static const int N = 100; const auto& key_foo = KeyFoo(); Rendezvous::Args args; SchedClosure([=]() { for (int i = 0; i < N; ++i) { TF_ASSERT_OK(rendez_->Send(key_foo, args, V(strings::StrCat(i)), false)); RandomSleep(); } }); Tensor val; bool val_dead; for (int i = 0; i < N; ++i) { TF_ASSERT_OK(rendez_->Recv(key_foo, args, &val, &val_dead)); RandomSleep(); } } TEST_F(LocalRendezvousTest, RecvAbort) { rendez_->Ref(); SchedClosure([this]() { rendez_->StartAbort(errors::Aborted("")); rendez_->Unref(); }); Tensor val(DT_STRING); bool val_dead = false; Rendezvous::Args args; Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead); EXPECT_TRUE(absl::IsAborted(status)); } TEST_F(LocalRendezvousTest, RecvSleepAbort) { rendez_->Ref(); SchedClosure([this]() { Env::Default()->SleepForMicroseconds(1000000); rendez_->StartAbort(errors::Aborted("")); rendez_->Unref(); }); Tensor val(DT_STRING); bool val_dead = false; Rendezvous::Args args; Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead); EXPECT_TRUE(absl::IsAborted(status)); } TEST_F(LocalRendezvousTest, AbortThenRecvOrSend) { rendez_->StartAbort(errors::Aborted("")); Tensor val(DT_STRING); bool val_dead = false; Rendezvous::Args args; EXPECT_TRUE(absl::IsAborted(rendez_->Send(KeyFoo(), args, val, val_dead))); EXPECT_TRUE(absl::IsAborted(rendez_->Recv(KeyFoo(), args, &val, &val_dead))); } class DummyDeviceContext : public DeviceContext { public: explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {} ~DummyDeviceContext() override {} int stream_id() const { return stream_id_; } void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device, Tensor* output_tensor, StatusCallback done) const override { done(absl::OkStatus()); } private: const int stream_id_; }; TEST_F(LocalRendezvousTest, TransferDummyDeviceContext) { Rendezvous::Args args; args.device_context = new DummyDeviceContext(123); TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false)); Notification n; Rendezvous::Args args1; args1.device_context = new DummyDeviceContext(1); rendez_->RecvAsync( KeyFoo(), args1, [&n](const Status& s, const Rendezvous::Args& send_args, const Rendezvous::Args& recv_args, const Tensor& val, bool is_dead) { CHECK_EQ(123, dynamic_cast<const DummyDeviceContext*>( send_args.device_context) ->stream_id()); n.Notify(); }); n.WaitForNotification(); args.device_context->Unref(); args1.device_context->Unref(); } void BM_SendRecv(::testing::benchmark::State& state) { Rendezvous* rendez = NewLocalRendezvous(); Tensor orig = V("val"); Tensor val(DT_STRING, TensorShape({})); bool is_dead = false; Rendezvous::Args args; for (auto s : state) { TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead)); TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &val, &is_dead)); } CHECK_EQ(V(val), V(orig)); rendez->Unref(); } BENCHMARK(BM_SendRecv); void BM_RecvSend(::testing::benchmark::State& state) { Rendezvous* rendez = NewLocalRendezvous(); Tensor orig = V("val"); Tensor val(DT_STRING, TensorShape({})); bool is_dead = false; Rendezvous::Args args; for (auto s : state) { bool received = false; rendez->RecvAsync( KeyFoo(), args, [&val, &received](const Status& , const Rendezvous::Args& , const Rendezvous::Args& , const Tensor& tensor, bool ) { val = tensor; received = true; }); TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead)); CHECK(received); } CHECK_EQ(V(val), V(orig)); rendez->Unref(); } BENCHMARK(BM_RecvSend); void BM_PingPong(::testing::benchmark::State& state) { const int messages_count = state.range(0); auto* cm = new CancellationManager(); thread::ThreadPool* pool = new thread::ThreadPool(Env::Default(), "test", 1); for (auto s : state) { Rendezvous* rendez = NewLocalRendezvous(); pool->Schedule([rendez, messages_count]() { Tensor bar = V("bar"); Tensor foo(DT_STRING, TensorShape({})); bool is_dead = false; Rendezvous::Args args; for (int i = 0; i < messages_count; ++i) { TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &foo, &is_dead)); TF_CHECK_OK(rendez->Send(KeyBar(), args, bar, is_dead)); } CHECK_EQ("foo", V(foo)); }); Tensor foo = V("foo"); Tensor bar(DT_STRING, TensorShape({})); bool is_dead = false; Rendezvous::Args args; args.cancellation_manager = cm; for (int i = 0; i < messages_count; ++i) { TF_CHECK_OK(rendez->Send(KeyFoo(), args, foo, is_dead)); TF_CHECK_OK(rendez->Recv(KeyBar(), args, &bar, &is_dead)); } CHECK_EQ("bar", V(bar)); rendez->Unref(); } state.SetItemsProcessed(messages_count * state.iterations()); delete pool; delete cm; } BENCHMARK(BM_PingPong)->Arg(100)->Arg(200)->Arg(300); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ecfc7b9e-9c2a-42a9-b642-6e26f02538cb
cpp
tensorflow/tensorflow
operand_upcaster
third_party/xla/xla/service/operand_upcaster.cc
third_party/xla/xla/service/operand_upcaster_test.cc
#include "xla/service/operand_upcaster.h" #include <optional> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/shape_inference.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::StatusOr<std::optional<Shape>> MaybeInferShape( const HloInstruction* instruction) { switch (instruction->opcode()) { case HloOpcode::kDot: return ShapeInference::InferDotOpShape( instruction->operand(0)->shape(), instruction->operand(1)->shape(), instruction->dot_dimension_numbers(), std::nullopt, Cast<HloDotInstruction>(instruction)->sparsity()); case HloOpcode::kConvolution: return ShapeInference::InferConvolveShape( instruction->operand(0)->shape(), instruction->operand(1)->shape(), instruction->feature_group_count(), instruction->batch_group_count(), instruction->window(), instruction->convolution_dimension_numbers(), std::nullopt); default: return std::optional<Shape>(std::nullopt); } } } bool OperandUpcaster::InstructionMatchesPattern(HloInstruction* instruction) { auto status_or_inferred_shape = MaybeInferShape(instruction); if (!status_or_inferred_shape.ok() || !status_or_inferred_shape->has_value()) { return false; } if (absl::c_count(instruction->precision_config().operand_precision(), PrecisionConfig::PACKED_NIBBLE) == 2) { return true; } PrimitiveType inferred_type = (*status_or_inferred_shape)->element_type(); if (instruction->shape().element_type() == inferred_type && instruction->operand(0)->shape().element_type() == inferred_type && instruction->operand(1)->shape().element_type() == inferred_type) { return false; } return ShapeUtil::ElementCanUpcast(**status_or_inferred_shape, instruction->shape()); } absl::StatusOr<HloInstruction*> OperandUpcaster::ExpandInstruction( HloInstruction* instruction) { const bool packed_nibble = absl::c_count(instruction->precision_config().operand_precision(), PrecisionConfig::PACKED_NIBBLE) == 2; auto type = instruction->shape().element_type(); if (packed_nibble) { HloInstruction *lhs_n0 = instruction->mutable_operand(0), *lhs_n1 = lhs_n0, *rhs_n0 = instruction->mutable_operand(1), *rhs_n1 = rhs_n0; TF_ASSIGN_OR_RETURN(lhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, lhs_n0, MakeScalarLike(lhs_n0, 4))); HloOpcode lhs_shift = ShapeUtil::ElementIsSigned(lhs_n0->shape()) ? HloOpcode::kShiftRightArithmetic : HloOpcode::kShiftRightLogical; TF_ASSIGN_OR_RETURN( lhs_n0, MakeBinaryHlo(lhs_shift, lhs_n0, MakeScalarLike(lhs_n0, 4))); lhs_n0 = MakeConvertToHlo(lhs_n0, type); TF_ASSIGN_OR_RETURN( lhs_n1, MakeBinaryHlo(lhs_shift, lhs_n1, MakeScalarLike(lhs_n1, 4))); lhs_n1 = MakeConvertToHlo(lhs_n1, type); TF_ASSIGN_OR_RETURN(rhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, rhs_n0, MakeScalarLike(rhs_n0, 4))); HloOpcode rhs_shift = ShapeUtil::ElementIsSigned(rhs_n0->shape()) ? HloOpcode::kShiftRightArithmetic : HloOpcode::kShiftRightLogical; TF_ASSIGN_OR_RETURN( rhs_n0, MakeBinaryHlo(rhs_shift, rhs_n0, MakeScalarLike(rhs_n0, 4))); rhs_n0 = MakeConvertToHlo(rhs_n0, type); TF_ASSIGN_OR_RETURN( rhs_n1, MakeBinaryHlo(rhs_shift, rhs_n1, MakeScalarLike(rhs_n1, 4))); rhs_n1 = MakeConvertToHlo(rhs_n1, type); HloInstruction* linear_n0 = instruction->parent()->AddInstruction(instruction->CloneWithNewOperands( instruction->shape(), {lhs_n0, rhs_n0})); linear_n0->mutable_precision_config()->mutable_operand_precision()->Set( 0, PrecisionConfig::DEFAULT); linear_n0->mutable_precision_config()->mutable_operand_precision()->Set( 1, PrecisionConfig::DEFAULT); HloInstruction* linear_n1 = instruction->parent()->AddInstruction(linear_n0->CloneWithNewOperands( instruction->shape(), {lhs_n1, rhs_n1})); return MakeBinaryHlo(HloOpcode::kAdd, linear_n0, linear_n1); } for (int i = 0; i < HloDotInstruction::kOperands; ++i) { auto* operand = instruction->mutable_operand(i); if (operand->shape().element_type() == type) { continue; } auto upcast_shape = operand->shape(); upcast_shape.set_element_type(type); auto* convert_inst = instruction->AddInstruction( HloInstruction::CreateConvert(upcast_shape, operand)); TF_RETURN_IF_ERROR( instruction->ReplaceOperandWithDifferentShape(i, convert_inst)); } return nullptr; } }
#include "xla/service/operand_upcaster.h" #include <memory> #include <tuple> #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/primitive_util.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = ::xla::testing::opcode_matchers; class OperandUpcasterTest : public HloTestBase, public ::testing::WithParamInterface< std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>> {}; bool ShouldUpcast(PrimitiveType operand_type, PrimitiveType result_type) { return operand_type != result_type && primitive_util::HigherPrecisionType(operand_type, result_type) == result_type; } TEST_P(OperandUpcasterTest, ConvertInserted) { PrimitiveType lhs_type, rhs_type, result_type; std::tie(lhs_type, rhs_type, result_type) = GetParam(); absl::string_view module_tmpl = R"( HloModule module ENTRY main { p0 = $0[2,3]{1,0} parameter(0) p1 = $1[3,2]{1,0} parameter(1) ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; auto module_string = absl::Substitute( module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type), primitive_util::LowercasePrimitiveTypeName(rhs_type), primitive_util::LowercasePrimitiveTypeName(result_type)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get())); EXPECT_EQ(upcasted, ShouldUpcast(lhs_type, result_type) || ShouldUpcast(rhs_type, result_type)); auto original_lhs = op::Parameter(0); auto original_rhs = op::Parameter(1); auto upcasted_lhs = ShouldUpcast(lhs_type, result_type) ? AllOf(op::Convert(original_lhs), op::Shape(absl::Substitute( "$0[2,3]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type)))) : original_lhs; auto upcasted_rhs = ShouldUpcast(rhs_type, result_type) ? AllOf(op::Convert(original_rhs), op::Shape(absl::Substitute( "$0[3,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type)))) : original_rhs; EXPECT_THAT( module->entry_computation()->root_instruction(), AllOf(op::Dot(upcasted_lhs, upcasted_rhs), op::Shape(absl::Substitute( "$0[2,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type))))); } INSTANTIATE_TEST_SUITE_P(S16U16, OperandUpcasterTest, ::testing::Values(std::make_tuple(S8, S8, S16), std::make_tuple(U8, U8, U16))); INSTANTIATE_TEST_SUITE_P(S32, OperandUpcasterTest, ::testing::Combine(::testing::Values(S8, U8, S16), ::testing::Values(S8, U8, S16), ::testing::Values(S32))); INSTANTIATE_TEST_SUITE_P(U32, OperandUpcasterTest, ::testing::Combine(::testing::Values(U8, U16), ::testing::Values(U8, U16), ::testing::Values(U32))); INSTANTIATE_TEST_SUITE_P(BF16, OperandUpcasterTest, ::testing::Combine(::testing::Values(BF16, S8, U8), ::testing::Values(BF16, S8, U8), ::testing::Values(BF16))); INSTANTIATE_TEST_SUITE_P(F32, OperandUpcasterTest, ::testing::Combine(::testing::Values(BF16, F16), ::testing::Values(BF16, F16), ::testing::Values(F32))); INSTANTIATE_TEST_SUITE_P(NoUpcast, OperandUpcasterTest, ::testing::Values(std::make_tuple(F32, F32, BF16), std::make_tuple(S32, S32, U32))); TEST_F(OperandUpcasterTest, SparseDot) { absl::string_view kHlo = R"( HloModule module ENTRY main { p0 = bf16[2,16]{1,0} parameter(0) p1 = bf16[32,2]{1,0} parameter(1) meta = u16[2,2]{1,0} parameter(2) ROOT dot = f32[2,2]{1,0} dot(p0, p1, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHlo)); TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get())); EXPECT_TRUE(upcasted); auto upcasted_lhs = AllOf(op::Convert(op::Parameter(0)), op::Shape("f32[2,16]{1,0}")); auto upcasted_rhs = AllOf(op::Convert(op::Parameter(1)), op::Shape("f32[32,2]{1,0}")); EXPECT_THAT(module->entry_computation()->root_instruction(), AllOf(::testing::MakeMatcher(new ::xla::testing::HloMatcher( HloOpcode::kDot, {upcasted_lhs, upcasted_rhs, op::Parameter(2)})), op::Shape("f32[2,2]{1,0}"))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b969274a-0550-41b7-9010-91181907524b
cpp
tensorflow/tensorflow
dot_merger
third_party/xla/xla/service/dot_merger.cc
third_party/xla/xla/service/dot_merger_test.cc
#include "xla/service/dot_merger.h" #include <cstdint> #include <set> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/protobuf_util.h" #include "xla/service/graphcycles/graphcycles.h" #include "xla/service/shape_inference.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::StatusOr<HloInstruction*> TryMergeSameOperand(HloInstruction* a, HloInstruction* b) { if (a->shape().layout() != b->shape().layout()) { VLOG(3) << "Can't merge dots because they have a different layout:\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); return nullptr; } if (a->operand(0) != b->operand(0) && a->operand(1) != b->operand(1)) { VLOG(4) << "Can't merge dots because they don't share an operand.\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); return nullptr; } if (a->operand(0)->shape().element_type() != b->operand(0)->shape().element_type() || a->operand(1)->shape().element_type() != b->operand(1)->shape().element_type() || a->shape().element_type() != b->shape().element_type()) { VLOG(3) << "Can't merge dots because their lhs/rhs/return-types don't match.\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); return nullptr; } const DotDimensionNumbers& dnums_a = a->dot_dimension_numbers(); const DotDimensionNumbers& dnums_b = b->dot_dimension_numbers(); if (!absl::c_equal(dnums_a.lhs_batch_dimensions(), dnums_b.lhs_batch_dimensions()) || !absl::c_equal(dnums_a.rhs_batch_dimensions(), dnums_b.rhs_batch_dimensions()) || !absl::c_equal(dnums_a.lhs_contracting_dimensions(), dnums_b.lhs_contracting_dimensions()) || !absl::c_equal(dnums_a.rhs_contracting_dimensions(), dnums_b.rhs_contracting_dimensions())) { VLOG(3) << "Can't merge dots because they have mismatching dnums.\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString() << "\n" << absl::c_equal(dnums_a.lhs_batch_dimensions(), dnums_b.lhs_batch_dimensions()) << ", " << absl::c_equal(dnums_a.rhs_batch_dimensions(), dnums_b.rhs_batch_dimensions()) << ", " << absl::c_equal(dnums_a.lhs_contracting_dimensions(), dnums_b.lhs_contracting_dimensions()) << ", " << absl::c_equal(dnums_a.rhs_contracting_dimensions(), dnums_b.rhs_contracting_dimensions()); return nullptr; } if (!absl::c_equal(a->precision_config().operand_precision(), b->precision_config().operand_precision())) { VLOG(3) << "Can't merge dots because they have mismatching operand " "precisions:\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); return nullptr; } HloDotInstruction* dot_a = Cast<HloDotInstruction>(a); HloDotInstruction* dot_b = Cast<HloDotInstruction>(b); if (!absl::c_equal(dot_a->sparsity(), dot_b->sparsity(), protobuf_util::ProtobufEquals)) { VLOG(3) << "Can't merge dots because they have mismatching sparsity " "descriptors:\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); return nullptr; } VLOG(2) << "Merging dots sharing an operand:\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); const DotDimensionNumbers& dnums = a->dot_dimension_numbers(); bool lhs_same = a->operand(0) == b->operand(0); HloInstruction* shared_op = a->mutable_operand(lhs_same ? 0 : 1); HloInstruction* diff_op_a = a->mutable_operand(lhs_same ? 1 : 0); HloInstruction* diff_op_b = b->mutable_operand(lhs_same ? 1 : 0); if (diff_op_a->shape().layout() != diff_op_b->shape().layout()) { VLOG(3) << "Can't merge dots because the different operands have a " "different layout:\n" << "\t" << diff_op_a->ToString() << "\n" << "\t" << diff_op_b->ToString(); return nullptr; } CHECK_EQ(dnums.lhs_batch_dimensions_size(), dnums.rhs_batch_dimensions_size()); std::set<int64_t> used_dims; int64_t shared_op_num_non_contracting_dims = shared_op->shape().rank() - dnums.lhs_batch_dimensions_size(); if (lhs_same) { shared_op_num_non_contracting_dims -= dnums.lhs_contracting_dimensions_size(); used_dims.insert(dnums.rhs_contracting_dimensions().begin(), dnums.rhs_contracting_dimensions().end()); used_dims.insert(dnums.rhs_batch_dimensions().begin(), dnums.rhs_batch_dimensions().end()); } else { shared_op_num_non_contracting_dims -= dnums.rhs_contracting_dimensions_size(); used_dims.insert(dnums.lhs_contracting_dimensions().begin(), dnums.lhs_contracting_dimensions().end()); used_dims.insert(dnums.lhs_batch_dimensions().begin(), dnums.lhs_batch_dimensions().end()); } if (used_dims.size() + 1 != diff_op_a->shape().rank()) { VLOG(3) << "Can't merge dots because the different operands don't have exactly " "one non-contracting dimension:\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); return nullptr; } int64_t outer_dim = 0; for (auto used_dim : used_dims) { if (used_dim != outer_dim) { break; } ++outer_dim; } std::vector<SparsityDescriptor> sparsity(dot_a->sparsity().begin(), dot_a->sparsity().end()); std::vector<HloInstruction*> sparse_meta(sparsity.size()); for (int i = 0; i < sparsity.size(); ++i) { HloInstruction* meta = a->mutable_operand(HloDotInstruction::kOperands + i); HloInstruction* other_meta = b->mutable_operand(HloDotInstruction::kOperands + i); if (sparsity[i].index() == (lhs_same ? 1 : 0)) { TF_ASSIGN_OR_RETURN( Shape meta_concat_shape, ShapeInference::InferConcatOpShape( {&meta->shape(), &other_meta->shape()}, outer_dim)); meta = meta->AddInstruction(HloInstruction::CreateConcatenate( meta_concat_shape, {meta, other_meta}, outer_dim)); } else { if (other_meta != meta) { VLOG(3) << "Can't merge dots because the sparsity metadata is different:\n" << "\t" << a->ToString() << "\n" << "\t" << b->ToString(); return nullptr; } } sparse_meta[i] = meta; } TF_ASSIGN_OR_RETURN( Shape concat_shape, ShapeInference::InferConcatOpShape( {&diff_op_a->shape(), &diff_op_b->shape()}, outer_dim)); *concat_shape.mutable_layout() = diff_op_a->shape().layout(); HloInstruction* concat_op = diff_op_a->AddInstruction(HloInstruction::CreateConcatenate( concat_shape, {diff_op_a, diff_op_b}, outer_dim)); HloInstruction* dot_lhs = lhs_same ? shared_op : concat_op; HloInstruction* dot_rhs = lhs_same ? concat_op : shared_op; TF_ASSIGN_OR_RETURN( Shape new_dot_shape, ShapeInference::InferDotOpShape( dot_lhs->shape(), dot_rhs->shape(), dnums, a->shape().element_type(), sparsity)); *new_dot_shape.mutable_layout() = a->shape().layout(); HloInstruction* new_dot = a->AddInstruction( HloInstruction::CreateDot(new_dot_shape, dot_lhs, dot_rhs, dnums, a->precision_config(), sparsity, sparse_meta)); if (!a->metadata().op_name().empty()) { new_dot->set_metadata(a->metadata()); } else if (!b->metadata().op_name().empty()) { new_dot->set_metadata(b->metadata()); } DimensionVector start_indices(new_dot_shape.dimensions_size(), 0); DimensionVector limit_indices(new_dot_shape.dimensions().begin(), new_dot_shape.dimensions().end()); DimensionVector strides(new_dot_shape.dimensions_size(), 1); int64_t slice_dim = new_dot_shape.dimensions_size() - (lhs_same ? 1 : 1 + shared_op_num_non_contracting_dims); limit_indices[slice_dim] = a->shape().dimensions(slice_dim); HloInstruction* new_a = a->AddInstruction(HloInstruction::CreateSlice( a->shape(), new_dot, start_indices, limit_indices, strides)); TF_RETURN_IF_ERROR(a->ReplaceAllUsesWith(new_a)); start_indices[slice_dim] = limit_indices[slice_dim]; limit_indices[slice_dim] = new_dot_shape.dimensions(slice_dim); HloInstruction* new_b = b->AddInstruction(HloInstruction::CreateSlice( b->shape(), new_dot, start_indices, limit_indices, strides)); TF_RETURN_IF_ERROR(b->ReplaceAllUsesWith(new_b)); return new_dot; } absl::StatusOr<bool> MergeDots(HloComputation* comp, int64_t max_size_to_merge) { auto is_merge_candidate = [&](HloInstruction* instr) { int64_t bytes = ShapeUtil::ByteSizeOfElements(instr->shape()); for (const HloInstruction* operand : instr->operands()) { bytes += ShapeUtil::ByteSizeOfElements(operand->shape()); } return bytes <= max_size_to_merge; }; absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<HloInstruction*>> equivalence_classes; for (HloInstruction* instr : comp->instructions()) { if (instr->opcode() != HloOpcode::kDot || !instr->control_predecessors().empty() || !instr->control_successors().empty()) { continue; } for (HloInstruction* operand : instr->operands()) { equivalence_classes[operand].insert(instr); } } absl::erase_if( equivalence_classes, [&](const std::pair<const HloInstruction*, absl::flat_hash_set<HloInstruction*>>& kv) { const auto& v = kv.second; return v.size() < 2 || absl::c_none_of(v, is_merge_candidate); }); if (equivalence_classes.empty()) { return false; } GraphCycles graph; absl::flat_hash_map<HloInstruction*, int32_t> graph_ids_map; auto graph_id = [&](HloInstruction* instr) { auto it_and_inserted = graph_ids_map.emplace(instr, -1); auto it = it_and_inserted.first; auto inserted = it_and_inserted.second; if (inserted) { it->second = graph.NewNode(); } return it->second; }; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { int32_t id = graph_id(instr); for (HloInstruction* operand : instr->operands()) { CHECK(graph.InsertEdge(graph_id(operand), id)); } for (HloInstruction* control_pred : instr->control_predecessors()) { CHECK(graph.InsertEdge(graph_id(control_pred), id)); } } absl::flat_hash_set<HloInstruction*> dead_instrs; std::vector<HloInstruction*> keys; keys.reserve(equivalence_classes.size()); for (auto& kv : equivalence_classes) { keys.push_back(kv.first); } absl::c_sort(keys, [](const HloInstruction* a, const HloInstruction* b) { return a->unique_id() < b->unique_id(); }); for (auto key : keys) { const auto& values = equivalence_classes[key]; absl::InlinedVector<HloInstruction*, 16> dots(values.begin(), values.end()); absl::c_sort(dots, [](const HloInstruction* a, const HloInstruction* b) { return a->unique_id() < b->unique_id(); }); for (int64_t i = 0; i < dots.size(); i++) { HloInstruction*& a = dots[i]; if (a == nullptr) { continue; } for (int64_t j = i + 1; j < dots.size(); j++) { HloInstruction* b = dots[j]; if (b == nullptr) { continue; } int32_t a_id = graph_id(a); int32_t b_id = graph_id(b); if (dead_instrs.contains(a) || dead_instrs.contains(b) || (!is_merge_candidate(a) && !is_merge_candidate(b)) || graph.IsReachableNonConst(a_id, b_id) || graph.IsReachableNonConst(b_id, a_id)) { continue; } TF_ASSIGN_OR_RETURN(HloInstruction * merged, TryMergeSameOperand(a, b)); if (merged != nullptr) { int32_t merged_id = graph_id(merged); graph.InsertEdge(a_id, merged_id); graph.InsertEdge(b_id, merged_id); for (int32_t succ : graph.SuccessorsCopy(a_id)) { graph.InsertEdge(merged_id, succ); } for (int32_t succ : graph.SuccessorsCopy(b_id)) { graph.InsertEdge(merged_id, succ); } dead_instrs.insert(a); dead_instrs.insert(b); dots[i] = merged; dots[j] = nullptr; } } } } for (HloInstruction* instr : dead_instrs) { TF_RETURN_IF_ERROR(comp->RemoveInstruction(instr)); } return !dead_instrs.empty(); } } absl::StatusOr<bool> DotMerger::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool changed_computation, MergeDots(comp, max_size_to_merge_)); changed |= changed_computation; } return changed; } }
#include "xla/service/dot_merger.h" #include <cstdint> #include <limits> #include <memory> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace m = ::xla::match; class DotMergerTest : public HloTestBase { public: DotMergerTest() : HloTestBase(false, false) {} }; TEST_F(DotMergerTest, MergeRHS) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs = f32[200,100] parameter(0) rhs0 = f32[100, 10] parameter(1) rhs1 = f32[100, 50] parameter(2) dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); const HloInstruction* dot0 = nullptr; const HloInstruction* dot1 = nullptr; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1))))); EXPECT_EQ(dot0, dot1); EXPECT_THAT(dot0, GmockMatch(m::Dot(m::Parameter(0), m::Concatenate().WithBinaryOperandsAnyOrder( m::Parameter(1), m::Parameter(2))))); } TEST_F(DotMergerTest, MergeRHSWithLayouts) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs = f32[200,100] parameter(0) rhs0 = f32[100, 10]{0,1} parameter(1) rhs1 = f32[100, 50]{0,1} parameter(2) dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); const HloInstruction* dot0 = nullptr; const HloInstruction* dot1 = nullptr; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(m::Op(&dot0)), m::Slice(m::Op(&dot1))))); EXPECT_EQ(dot0, dot1); Shape expected_concat_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 60}, {0, 1}); EXPECT_THAT( dot0, GmockMatch(m::Dot(m::Parameter(0), m::Concatenate() .WithBinaryOperandsAnyOrder(m::Parameter(1), m::Parameter(2)) .WithShapeEqualTo(&expected_concat_shape)))); } TEST_F(DotMergerTest, NoMergeDifferentLayoutRHS) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs = f32[200,100] parameter(0) rhs0 = f32[100, 10]{0,1} parameter(1) rhs1 = f32[100, 50]{1,0} parameter(2) dot0 = f32[200, 10] dot(lhs, rhs0), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[200, 50] dot(lhs, rhs1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[200,10], f32[200,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, MergeLHS) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) lhs1 = f32[300,200] parameter(1) rhs = f32[200, 50] parameter(2) dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(), m::Slice()))); } TEST_F(DotMergerTest, MergeLHSDotsWithNonDefaultLayout) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) lhs1 = f32[300,200] parameter(1) rhs = f32[200, 50] parameter(2) dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[300, 50]{0,1} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{0,1}) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); Shape expected_dot_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {400, 50}, {0, 1}); const HloInstruction* dot0 = nullptr; const HloInstruction* dot1 = nullptr; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(m::Dot(&dot0, m::Op(), m::Op()) .WithShapeEqualTo(&expected_dot_shape)), m::Slice(m::Dot(&dot1, m::Op(), m::Op()))))); EXPECT_EQ(dot0, dot1); } TEST_F(DotMergerTest, NoMergeDifferentLayoutLHS) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200]{1,0} parameter(0) lhs1 = f32[300,200]{0,1} parameter(1) rhs = f32[200, 50] parameter(2) dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeDifferentDotLayout) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) lhs1 = f32[300,200] parameter(1) rhs = f32[200, 50] parameter(2) dot0 = f32[100, 50]{0,1} dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[300, 50]{1,0} dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50]{0,1}, f32[300,50]{1,0}) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, MergeThree) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) lhs1 = f32[300,200] parameter(1) lhs2 = f32[500,200] parameter(2) rhs = f32[200, 50] parameter(3) dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}}; TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status()); const HloInstruction* s0 = nullptr; const HloInstruction* s1 = nullptr; const HloInstruction* s2 = nullptr; SCOPED_TRACE(module->ToString()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Dot( &s0, m::Concatenate(m::Parameter(0), m::Parameter(1), m::Parameter(2)), m::Parameter(3))), m::Slice(m::Op(&s1)), m::Slice(m::Op(&s2))))); EXPECT_EQ(s0, s1); EXPECT_EQ(s1, s2); } TEST_F(DotMergerTest, NoMergeThreeDueToCycle) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) lhs1 = f32[300,200] parameter(1) rhs = f32[200, 50] parameter(2) dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} zero = f32[] constant(0) lhs2 = f32[500,200] pad(dot0, zero), padding=400_0x150_0 dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); AlgebraicSimplifier algsimp{AlgebraicSimplifierOptions{}}; TF_ASSERT_OK(this->RunHloPass(&algsimp, module.get()).status()); const HloInstruction* s0 = nullptr; const HloInstruction* s1 = nullptr; const HloInstruction* s2 = nullptr; SCOPED_TRACE(module->ToString()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)), m::Parameter(2))), m::Slice(m::Op(&s1)), m::Dot(&s2, m::Op(), m::Parameter(2))))); EXPECT_EQ(s0, s1); EXPECT_NE(s0, s2); } TEST_F(DotMergerTest, NoMergeDataDependency) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) rhs = f32[200, 50] parameter(1) dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} zero = f32[] constant(0) lhs1 = f32[300,200] pad(dot0, zero), padding=200_0x150_0 dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, MergeSameContractingDimsOnBothSides) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) lhs1 = f32[300,200] parameter(1) rhs = f32[50, 200] parameter(2) dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1} dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1} ROOT tuple = (f32[100,50], f32[300,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(), m::Slice()))); } TEST_F(DotMergerTest, MergeWithBatchDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[2,4,100,200] parameter(0) lhs1 = f32[2,4,300,200] parameter(1) rhs = f32[2,4,200, 50] parameter(2) dot0 = f32[2,4,100, 50] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_contracting_dims={2} dot1 = f32[2,4,300, 50] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_contracting_dims={2} ROOT tuple = (f32[2,4,100,50], f32[2,4,300,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(), m::Slice()))); } TEST_F(DotMergerTest, MergeWithBatchDimsAndMultipleContractingDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs = f32[2,3,4,5] parameter(0) rhs0 = f32[2,6,3,4,5] parameter(1) rhs1 = f32[2,7,3,4,5] parameter(2) dot0 = f32[2,4,6] dot(lhs, rhs0), lhs_batch_dims={0,2}, rhs_batch_dims={0,3}, lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4} dot1 = f32[2,4,7] dot(lhs, rhs1), lhs_batch_dims={0,2}, rhs_batch_dims={0,3}, lhs_contracting_dims={1,3}, rhs_contracting_dims={2,4} ROOT tuple = (f32[2,4,6], f32[2,4,7]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK(verifier().Run(module.get()).status()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(), m::Slice()))); } TEST_F(DotMergerTest, MergeWithUnsortedBatchDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[2,4,100,200] parameter(0) lhs1 = f32[2,4,300,200] parameter(1) rhs = f32[2,4,200, 50] parameter(2) dot0 = f32[4,2,100, 50] dot(lhs0, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0}, lhs_contracting_dims={3}, rhs_contracting_dims={2} dot1 = f32[4,2,300, 50] dot(lhs1, rhs), lhs_batch_dims={1,0}, rhs_batch_dims={1,0}, lhs_contracting_dims={3}, rhs_contracting_dims={2} ROOT tuple = (f32[4,2,100,50], f32[4,2,300,50]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Slice(), m::Slice()))); } TEST_F(DotMergerTest, NoMergeDueToIsMergeCandidate) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[100,200] parameter(0) lhs1 = f32[300,200] parameter(1) lhs2 = f32[500,200] parameter(2) rhs = f32[200, 50] parameter(3) dot0 = f32[100, 50] dot(lhs0, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[300, 50] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot2 = f32[500, 50] dot(lhs2, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[100,50], f32[300,50], f32[500,50]) tuple(dot0, dot1, dot2) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass((100 * 50 + 100 * 200 + 200 * 50) * sizeof(float)); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); const HloInstruction* s0 = nullptr; const HloInstruction* s1 = nullptr; const HloInstruction* s2 = nullptr; SCOPED_TRACE(module->ToString()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)), m::Parameter(3))), m::Slice(m::Op(&s1)), m::Dot(&s2, m::Parameter(2), m::Parameter(3))))); EXPECT_EQ(s0, s1); EXPECT_NE(s0, s2); } TEST_F(DotMergerTest, NoMergeDifferentLhsBatchDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10,10,10] parameter(0) lhs1 = f32[10,10,10,10] parameter(1) rhs = f32[10,10,10,10] parameter(2) dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2} dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,2}, rhs_batch_dims={0,1}, lhs_contracting_dims={1}, rhs_contracting_dims={2} ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeDifferentRhsBatchDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10,10,10] parameter(0) lhs1 = f32[10,10,10,10] parameter(1) rhs = f32[10,10,10,10] parameter(2) dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_contracting_dims={2} dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_batch_dims={0,1}, rhs_batch_dims={0,2}, lhs_contracting_dims={2}, rhs_contracting_dims={1} ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, MergeMultipleContractingDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10,10] parameter(0) lhs1 = f32[10,10,10] parameter(1) rhs = f32[10,10,10] parameter(2) dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1} dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1} ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); const HloInstruction* s0 = nullptr; const HloInstruction* s1 = nullptr; SCOPED_TRACE(module->ToString()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)), m::Parameter(2))), m::Slice(m::Op(&s1))))); EXPECT_EQ(s0, s1); } TEST_F(DotMergerTest, MergeMultipleNonContractingDimsInRhsSharedOperand) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[8,9,10] parameter(0) lhs1 = f32[8,9,11] parameter(1) rhs = f32[8,9,12,13] parameter(2) dot0 = f32[10,12,13] dot(lhs0, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1} dot1 = f32[11,12,13] dot(lhs1, rhs), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1} ROOT tuple = (f32[10,12,13], f32[11,12,13]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK(verifier().Run(module.get()).status()); const HloInstruction* s0 = nullptr; const HloInstruction* s1 = nullptr; SCOPED_TRACE(module->ToString()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Dot(&s0, m::Concatenate(m::Parameter(0), m::Parameter(1)), m::Parameter(2))), m::Slice(m::Op(&s1))))); EXPECT_EQ(s0, s1); } TEST_F(DotMergerTest, NoMergeMultipleOuterDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10,10] parameter(0) lhs1 = f32[10,10,10] parameter(1) rhs = f32[10,10,10] parameter(2) dot0 = f32[10,10,10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10,10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} ROOT tuple = (f32[10,10,10,10], f32[10,10,10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeDifferentLhsContractingDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10] parameter(0) lhs1 = f32[10,10] parameter(1) rhs = f32[10,10] parameter(2) dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeDifferentRhsContractingDims) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10] parameter(0) lhs1 = f32[10,10] parameter(1) rhs = f32[10,10] parameter(2) dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={1} ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeControlPredecessor) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10] parameter(0) lhs1 = f32[10,10] parameter(1) rhs = f32[10,10] parameter(2) dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot2 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}, control-predecessors={dot1} ROOT tuple = (f32[10,10], f32[10,10], f32[10,10]) tuple(dot0, dot1, dot2) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeDifferentLhsTypes) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f32[10,10] parameter(0) lhs1 = f16[10,10] parameter(1) rhs = f32[10,10] parameter(2) dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeDifferentRhsTypes) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs = f32[10,10] parameter(0) rhs0 = f32[10,10] parameter(1) rhs1 = f16[10,10] parameter(2) dot0 = f32[10,10] dot(lhs, rhs0), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10] dot(lhs, rhs1), lhs_contracting_dims={0}, rhs_contracting_dims={0} ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, NoMergeDifferentReturnTypes) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f16[10,10] parameter(0) lhs1 = f16[10,10] parameter(1) rhs = f16[10,10] parameter(2) dot0 = f16[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} ROOT tuple = (f16[10,10], f32[10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(DotMergerTest, MergeWithTypeUpgrade) { absl::string_view module_string = R"( HloModule module ENTRY main { lhs0 = f16[10,10] parameter(0) lhs1 = f16[10,10] parameter(1) rhs = f16[10,10] parameter(2) dot0 = f32[10,10] dot(lhs0, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} dot1 = f32[10,10] dot(lhs1, rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0} ROOT tuple = (f32[10,10], f32[10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); const HloInstruction* d0 = nullptr; const HloInstruction* d1 = nullptr; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Dot(&d0, m::Concatenate(m::Parameter(0), m::Parameter(1)), m::Parameter(2)) .WithShape(F32, {20, 10})), m::Slice(m::Op(&d1))))); EXPECT_EQ(d0, d1); } TEST_F(DotMergerTest, MergeSparseDotsSameMetadata) { absl::string_view kHlo = R"( HloModule test ENTRY main { lhs0 = f16[5,10,32] parameter(0) lhs1 = f16[5,10,32] parameter(1) rhs = f16[5,10,16] parameter(2) meta = u16[5,10,2] parameter(3) dot0 = f32[5,10,10] dot(lhs0, rhs, meta), sparsity=R.2@2:4, lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} dot1 = f32[5,10,10] dot(lhs1, rhs, meta), sparsity=R.2@2:4, lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHlo)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); const HloInstruction *d0, *d1; EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Op(&d0) .WithOpcode(HloOpcode::kDot) .WithOperand(0, m::Concatenate(m::Parameter(0), m::Parameter(1))) .WithOperand(1, m::Parameter(2)) .WithOperand(2, m::Parameter(3)) .WithShape(F32, {5, 20, 10})), m::Slice(m::Op(&d1))))); EXPECT_EQ(d0, d1); EXPECT_EQ(d0->operand(2)->shape(), ShapeUtil::MakeShape(U16, {5, 10, 2})); } TEST_F(DotMergerTest, MergeSparseDotsConcatMetadata) { absl::string_view kHlo = R"( HloModule test ENTRY main { lhs0 = f16[5,10,16] parameter(0) lhs1 = f16[5,10,16] parameter(1) rhs = f16[5,10,32] parameter(2) meta0 = u16[5,10,2] parameter(3) meta1 = u16[5,10,2] parameter(4) dot0 = f32[5,10,10] dot(lhs0, rhs, meta0), sparsity=L.2@2:4, lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} dot1 = f32[5,10,10] dot(lhs1, rhs, meta1), sparsity=L.2@2:4, lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHlo)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); const HloInstruction *d0, *d1; EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Slice(m::Op(&d0) .WithOpcode(HloOpcode::kDot) .WithOperand(0, m::Concatenate(m::Parameter(0), m::Parameter(1))) .WithOperand(1, m::Parameter(2)) .WithOperand(2, m::Concatenate(m::Parameter(3), m::Parameter(4))) .WithShape(F32, {5, 20, 10})), m::Slice(m::Op(&d1))))); EXPECT_EQ(d0, d1); EXPECT_EQ(d0->operand(2)->shape(), ShapeUtil::MakeShape(U16, {5, 20, 2})); } TEST_F(DotMergerTest, MergeSparseDotsDifferentMetadata) { absl::string_view kHlo = R"( HloModule test ENTRY main { lhs0 = f16[5,10,32] parameter(0) lhs1 = f16[5,10,32] parameter(1) rhs = f16[5,10,16] parameter(2) meta1 = u16[5,10,2] parameter(3) meta2 = u16[5,10,2] parameter(4) dot0 = f32[5,10,10] dot(lhs0, rhs, meta1), sparsity=R.2@2:4, lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} dot1 = f32[5,10,10] dot(lhs1, rhs, meta2), sparsity=R.2@2:4, lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT tuple = (f32[5,10,10], f32[5,10,10]) tuple(dot0, dot1) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHlo)); DotMerger pass(std::numeric_limits<int64_t>::max()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_merger.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_merger_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bcbda2a7-93ed-47d7-b14f-5c2b1c80a977
cpp
tensorflow/tensorflow
host_offloading_prepare
third_party/xla/xla/service/host_offloading_prepare.cc
third_party/xla/xla/service/host_offloading_prepare_test.cc
#include "xla/service/host_offloading_prepare.h" #include <memory> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/call_graph.h" #include "xla/service/host_memory_offload_annotations.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using xla::host_memory_offload_annotations::kMoveToHostCustomCallTarget; bool IsHostAsyncStart(const HloInstruction* instruction) { return instruction->opcode() == HloOpcode::kAsyncStart && instruction->async_execution_thread() == HloInstruction::kHostThread && instruction->async_wrapped_instruction()->opcode() == HloOpcode::kCall; } absl::StatusOr<bool> RemoveSurroundingMoveCustomCalls( HloInstruction* async_start) { bool removed = false; for (HloInstruction* operand : async_start->operands()) { if (operand->IsCustomCall(kMoveToHostCustomCallTarget)) { CHECK_EQ(operand->operands().size(), 1); VLOG(1) << "Replacing " << operand->ToString() << " with " << operand->operands().at(0)->ToString(); TF_RETURN_IF_ERROR( operand->ReplaceAllUsesWith(operand->mutable_operand(0))); TF_RETURN_IF_ERROR(async_start->parent()->RemoveInstruction(operand)); removed = true; } } return removed; } absl::StatusOr<bool> ElideMoveCustomCalls(HloModule* module) { bool changed = false; std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module); for (HloComputation* computation : module->computations()) { if (computation->execution_thread() != HloInstruction::kHostThread) { continue; } std::vector<HloInstruction*> callers = call_graph->GetComputationCallers(computation); for (HloInstruction* caller : callers) { VLOG(2) << "Hlo computation " << computation->name() << " is offloaded to host and has caller " << caller->ToString(); if (caller->parent()->execution_thread() == HloInstruction::kHostThread) { VLOG(3) << "Nested host computation, must be a async-wrapper"; continue; } VLOG(2) << "Going to adjust before and after " << caller->name(); } } for (HloComputation* computation : module->computations()) { for (HloInstruction* instruction : computation->instructions()) { if (IsHostAsyncStart(instruction)) { VLOG(2) << "Found async start of host computation: " << instruction->ToString() << " done must be " << instruction->users().at(0)->ToString(); TF_ASSIGN_OR_RETURN(bool removed, RemoveSurroundingMoveCustomCalls(instruction)); changed = changed || removed; } } } return changed; } absl::StatusOr<bool> ConvertToCustomCall(HloModule* module) { bool changed = false; for (HloComputation* computation : module->computations()) { for (HloInstruction* instruction : computation->instructions()) { if (IsHostAsyncStart(instruction)) { auto* call_start = Cast<HloAsyncInstruction>(instruction); auto* call = call_start->async_wrapped_instruction(); auto custom_call = HloInstruction::CreateCustomCall( call->shape(), call->operands(), call->called_computations().at(0), "HostExecute"); custom_call->set_output_to_operand_aliasing( call->output_operand_aliasing()); HloComputation* async_computation = call_start->async_wrapped_computation(); async_computation->set_root_instruction( async_computation->AddInstruction(std::move(custom_call))); TF_RETURN_IF_ERROR(async_computation->RemoveInstruction(call)); changed = true; } } } return changed; } } absl::StatusOr<bool> HostOffloadingPrepare::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { switch (rewrite_) { case Rewrite::kElideMoveToHost: return ElideMoveCustomCalls(module); case Rewrite::kConvertToCustomCall: return ConvertToCustomCall(module); } } }
#include "xla/service/host_offloading_prepare.h" #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/host_memory_offload_annotations.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using Rewrite = HostOffloadingPrepare::Rewrite; class HostOffloadingPrepareTest : public HloTestBase { protected: absl::StatusOr<bool> RunRewrite(HloModule* module, Rewrite rewrite) { TF_EXPECT_OK(verifier().Run(module).status()); if (module->has_schedule()) { return absl::InternalError("Expected a non-scheduled module"); } HostOffloadingPrepare pass(rewrite); TF_ASSIGN_OR_RETURN(bool changed, pass.Run(module)); return changed; } std::vector<const HloInstruction*> GetHostOffloadAsyncStartInstructions( const HloModule* module) { std::vector<const HloInstruction*> result; for (const HloComputation* computation : module->computations()) { for (const HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kAsyncStart && instruction->async_execution_thread() == HloInstruction::kHostThread) { result.push_back(instruction); } } } return result; } }; TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToHost) { const std::string& hlo_string = R"( HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}} host_computation { Arg_0.0 = s32[32]{0} parameter(0) ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0) }, execution_thread="host" async_computation { param_0 = s32[32]{0} parameter(0) ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"} }, execution_thread="host" ENTRY main { Arg_0.1 = s32[32]{0:T(128)} parameter(0) constant.2 = s32[]{:T(128)} constant(2) broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={} multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3) move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost" start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host), async_execution_thread="host", calls=async_computation ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunRewrite(module.get(), Rewrite::kElideMoveToHost)); EXPECT_TRUE(changed); for (const HloInstruction* instruction : GetHostOffloadAsyncStartInstructions(module.get())) { for (const HloInstruction* operand : instruction->operands()) { EXPECT_FALSE(operand->IsCustomCall( {host_memory_offload_annotations::kMoveToHostCustomCallTarget})); } for (const HloInstruction* user : instruction->users()) { EXPECT_FALSE(user->IsCustomCall( {host_memory_offload_annotations::kMoveToDeviceCustomCallTarget})); } } } TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToHost) { const std::string& hlo_string = R"( HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}} host_computation { Arg_0.0 = s32[32]{0} parameter(0) Arg_0.1 = s32[32]{0} parameter(1) ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1) }, execution_thread="host" async_computation { param_0 = s32[32]{0} parameter(0) param_1 = s32[32]{0} parameter(1) ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"} }, execution_thread="host" ENTRY main { Arg_0.1 = s32[32]{0:T(128)} parameter(0) constant.2 = s32[]{:T(128)} constant(2) broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={} multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3) move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost" start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host, move_to_host), async_execution_thread="host", calls=async_computation ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunRewrite(module.get(), Rewrite::kElideMoveToHost)); EXPECT_TRUE(changed); for (const HloInstruction* instruction : GetHostOffloadAsyncStartInstructions(module.get())) { for (const HloInstruction* operand : instruction->operands()) { EXPECT_FALSE(operand->IsCustomCall( {host_memory_offload_annotations::kMoveToHostCustomCallTarget})); } for (const HloInstruction* user : instruction->users()) { EXPECT_FALSE(user->IsCustomCall( {host_memory_offload_annotations::kMoveToDeviceCustomCallTarget})); } } } TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToHost) { const std::string& hlo_string = R"( HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}} host_computation { Arg_0.0 = s32[32]{0} parameter(0) Arg_0.1 = s32[32]{0} parameter(1) ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1) }, execution_thread="host" async_computation { param_0 = s32[32]{0} parameter(0) param_1 = s32[32]{0} parameter(1) ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"} }, execution_thread="host" ENTRY main { Arg_0.1 = s32[32]{0:T(128)} parameter(0) constant.2 = s32[]{:T(128)} constant(2) broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={} multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3) move_to_host.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost" move_to_host.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost" start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host.1, move_to_host.2), async_execution_thread="host", calls=async_computation ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunRewrite(module.get(), Rewrite::kElideMoveToHost)); EXPECT_TRUE(changed); for (const HloInstruction* instruction : GetHostOffloadAsyncStartInstructions(module.get())) { for (const HloInstruction* operand : instruction->operands()) { EXPECT_FALSE(operand->IsCustomCall( {host_memory_offload_annotations::kMoveToHostCustomCallTarget})); } for (const HloInstruction* user : instruction->users()) { EXPECT_FALSE(user->IsCustomCall( {host_memory_offload_annotations::kMoveToDeviceCustomCallTarget})); } } } TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToDevice) { const std::string& hlo_string = R"( HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}} host_computation { Arg_0.0 = s32[32]{0} parameter(0) ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0) }, execution_thread="host" async_computation { param_0 = s32[32]{0} parameter(0) ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"} }, execution_thread="host" ENTRY main { Arg_0.1 = s32[32]{0:T(128)} parameter(0) constant.2 = s32[]{:T(128)} constant(2) broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={} multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3) move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice" start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device), async_execution_thread="host", calls=async_computation ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunRewrite(module.get(), Rewrite::kElideMoveToHost)); EXPECT_FALSE(changed); } TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToDevice) { const std::string& hlo_string = R"( HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}} host_computation { Arg_0.0 = s32[32]{0} parameter(0) Arg_0.1 = s32[32]{0} parameter(1) ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1) }, execution_thread="host" async_computation { param_0 = s32[32]{0} parameter(0) param_1 = s32[32]{0} parameter(1) ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"} }, execution_thread="host" ENTRY main { Arg_0.1 = s32[32]{0:T(128)} parameter(0) constant.2 = s32[]{:T(128)} constant(2) broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={} multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3) move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice" custom-call.cloned.call-start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device, move_to_device), async_execution_thread="host", calls=async_computation ROOT custom-call.cloned.call-done = s32[32]{0:T(128)} async-done(custom-call.cloned.call-start), frontend_attributes={_xla_compute_type="host"} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunRewrite(module.get(), Rewrite::kElideMoveToHost)); EXPECT_FALSE(changed); } TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToDevice) { const std::string& hlo_string = R"( HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}} host_computation { Arg_0.0 = s32[32]{0} parameter(0) Arg_0.1 = s32[32]{0} parameter(1) ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1) }, execution_thread="host" async_computation { param_0 = s32[32]{0} parameter(0) param_1 = s32[32]{0} parameter(1) ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"} }, execution_thread="host" ENTRY main { Arg_0.1 = s32[32]{0:T(128)} parameter(0) constant.2 = s32[]{:T(128)} constant(2) broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={} multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3) move_to_device.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice" move_to_device.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice" start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device.1, move_to_device.2), async_execution_thread="host", calls=async_computation ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunRewrite(module.get(), Rewrite::kElideMoveToHost)); EXPECT_FALSE(changed); } TEST_F(HostOffloadingPrepareTest, ConvertToCustomCall) { const char* hlo = R"( HloModule my_module host_computation { Arg_0.0 = s32[32] parameter(0) ROOT multiply.0 = s32[32] multiply(Arg_0.0, Arg_0.0) }, execution_thread="host" async_computation { param_0 = s32[32] parameter(0) ROOT call = s32[32] call(param_0), to_apply=host_computation }, execution_thread="host" ENTRY main { Arg_0.1 = s32[32] parameter(0) start = ((s32[32]), s32[32], u32[]) async-start(Arg_0.1), async_execution_thread="host", calls=async_computation ROOT done = s32[32] async-done(start) } )"; const char* expected = R"( )"; RunAndFilecheckHloRewrite( hlo, HostOffloadingPrepare(Rewrite::kConvertToCustomCall), expected); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b9f15842-1826-48a4-bfa0-e2fbdeeca04f
cpp
tensorflow/tensorflow
convert_async_collectives_to_sync
third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc
third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc
#include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h" #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/status/status.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/service/gpu/backend_configs.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { absl::Status GpuConvertAsyncCollectivesToSync::ConvertAsyncInstructionsToSync( HloComputation* computation, absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs) const { absl::flat_hash_map<HloInstruction*, HloInstruction*> replaced_ops; CollectiveBackendConfig sync_config; sync_config.set_is_sync(true); for (auto& [async_start, async_done] : async_pairs) { TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, async_start->backend_config<GpuBackendConfig>()); *gpu_config.mutable_collective_backend_config() = sync_config; TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config)); replaced_ops[async_start] = nullptr; replaced_ops[async_done] = async_start; } HloModule* module = computation->parent(); const HloInstructionSequence& sequence = module->schedule().sequence(computation); std::vector<HloInstruction*> new_sequence; new_sequence.reserve(sequence.size()); for (HloInstruction* instr : sequence.instructions()) { auto it = replaced_ops.find(instr); if (it == replaced_ops.end()) { new_sequence.push_back(instr); continue; } if (it->second == nullptr) { continue; } new_sequence.push_back(it->second); new_sequence.push_back(instr); } module->schedule().set_sequence(computation, new_sequence); return absl::OkStatus(); } } }
#include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h" #include <string_view> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::IsFalse; using ::testing::IsTrue; class GpuConvertAsyncCollectivesToSyncTest : public HloTestBase { public: absl::Status RunPass(HloModule *module, bool expect_change, HloPredicate is_nop = {}) { TF_ASSIGN_OR_RETURN(bool changed, GpuConvertAsyncCollectivesToSync{is_nop}.Run(module)); EXPECT_EQ(changed, expect_change); return absl::OkStatus(); } bool IsSync(HloModule *module, std::string_view name) { const HloInstruction *inst = FindInstruction(module, name); if (inst == nullptr) { return false; } auto backend_config = inst->backend_config<GpuBackendConfig>() .value() .collective_backend_config(); return backend_config.is_sync(); } HloPredicate is_nop_simple_ = HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kGetTupleElement, HloOpcode::kParameter>; }; TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduce) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3 ROOT done = u32[] all-reduce-done(start) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "start"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}} id2 = f32[] bitcast(id) ROOT done = u32[] all-reduce-done(start) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_)); EXPECT_THAT(IsSync(module.get(), "start"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectiveBroadcast) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true collective_broadcast { p0 = u32[8] parameter(0) ROOT result = u32[8] collective-broadcast(p0), replica_groups={{0,1}, {2,3}} } ENTRY main { data = u32[8] parameter(0) cb-start = ((u32[8]{0}), u32[8]{0}) async-start(u32[8]{0} %data), calls=collective_broadcast ROOT %ars = u32[8]{0} async-done(((u32[8]{0}), u32[8]{0}) %cb-start), calls=collective_broadcast } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "cb-start"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3 id2 = u32[] add(id, id) ROOT done = u32[] all-reduce-done(start) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), false)); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllGather) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true ENTRY test_computation { a1 = u32[1, 2] parameter(0) ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3 ROOT allgather = u32[2,2] all-gather-done(ags) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "ags"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true ENTRY test_computation { p = u32[2] parameter(0) start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}} ROOT done = u32[2] collective-permute-done(start) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "start"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true add { lhs = u32[] parameter(0) rhs = u32[] parameter(1) ROOT add = u32[] add(lhs, rhs) } reduce_scatter { p0 = u32[8] parameter(0) ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}}, dimensions={0}, to_apply=add } ENTRY main { data = u32[8] parameter(0) rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "rs-start"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllToAll) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true all_to_all { p0 = u32[2] parameter(0) ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}} } ENTRY test_computation { a1 = u32[2] parameter(0) a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "a2a-start"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, ControlDeps) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3 done1 = u32[] all-reduce-done(start1) start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1} done2 = u32[] all-reduce-done(start2) ROOT x = u32[] add(done1, done2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue()); EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3 start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4 done1 = u32[] all-reduce-done(start1) done2 = u32[] all-reduce-done(start2) ROOT x = u32[] add(done1, done2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue()); EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3 start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4 done2 = u32[] all-reduce-done(start2) done1 = u32[] all-reduce-done(start1) ROOT x = u32[] add(done1, done2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue()); EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue()); } TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) { const absl::string_view hlo_string = R"( HloModule test, is_scheduled=true apply_op { x = u32[] parameter(0) y = u32[] parameter(1) ROOT apply_op = u32[] add(x, y) } ENTRY test_computation { id = u32[] replica-id() start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3 start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4 done2 = u32[] all-reduce-done(start2) id2 = u32[] add(done2, done2) done1 = u32[] all-reduce-done(start1) ROOT x = u32[] add(done1, done2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(module.get(), true)); EXPECT_THAT(IsSync(module.get(), "start1"), IsFalse()); EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
13af8cfa-a80f-4abd-b903-d4e5aa058635
cpp
tensorflow/tensorflow
while_loop_trip_count_annotator
third_party/xla/xla/service/while_loop_trip_count_annotator.cc
third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc
#include "xla/service/while_loop_trip_count_annotator.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/while_loop_analysis.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" namespace xla { absl::StatusOr<bool> WhileLoopTripCountAnnotator::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (const HloComputation* comp : module->computations(execution_threads)) { for (HloInstruction* instr : comp->instructions()) { if (instr->opcode() != HloOpcode::kWhile) { continue; } if (auto trip_count = ComputeWhileLoopTripCount(instr)) { WhileLoopBackendConfig config; config.mutable_known_trip_count()->set_n(*trip_count); TF_RETURN_IF_ERROR(instr->set_backend_config(config)); changed = true; } } } return changed; } }
#include "xla/service/while_loop_trip_count_annotator.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { class TripCountAnnotatorTest : public HloTestBase {}; TEST_F(TripCountAnnotatorTest, KnownSmallTripCount) { const char* kModuleStr = R"( HloModule test Body { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 one = s32[] constant(1) i_plus_one = s32[] add(i, one) ROOT tuple = (s32[]) tuple(i_plus_one) } Cond { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(i, trip_count), direction=LT } ENTRY test { i_start = s32[] constant(0) initial_tuple = (s32[]) tuple(i_start) ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); WhileLoopTripCountAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get())); ASSERT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(auto config, m->entry_computation() ->root_instruction() ->backend_config<WhileLoopBackendConfig>()); EXPECT_EQ(10, config.known_trip_count().n()); } TEST_F(TripCountAnnotatorTest, KnownLargeTripCount) { const char* kModuleStr = R"( HloModule test Body { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 one = s32[] constant(1) i_plus_one = s32[] add(i, one) ROOT tuple = (s32[]) tuple(i_plus_one) } Cond { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 trip_count = s32[] constant(1000000) ROOT done = pred[] compare(i, trip_count), direction=LT } ENTRY test { i_start = s32[] constant(0) initial_tuple = (s32[]) tuple(i_start) ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); WhileLoopTripCountAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get())); ASSERT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(auto config, m->entry_computation() ->root_instruction() ->backend_config<WhileLoopBackendConfig>()); EXPECT_EQ(1000000, config.known_trip_count().n()); } TEST_F(TripCountAnnotatorTest, NonzeroStart) { const char* kModuleStr = R"( HloModule test Body { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 one = s32[] constant(1) i_plus_one = s32[] add(i, one) ROOT tuple = (s32[]) tuple(i_plus_one) } Cond { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 trip_count = s32[] constant(1000000) ROOT done = pred[] compare(i, trip_count), direction=LT } ENTRY test { i_start = s32[] constant(10) initial_tuple = (s32[]) tuple(i_start) ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); WhileLoopTripCountAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get())); ASSERT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(auto config, m->entry_computation() ->root_instruction() ->backend_config<WhileLoopBackendConfig>()); EXPECT_EQ(999990, config.known_trip_count().n()); } TEST_F(TripCountAnnotatorTest, LessThanOrEqualTo) { const char* kModuleStr = R"( HloModule test Body { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 one = s32[] constant(1) i_plus_one = s32[] add(i, one) ROOT tuple = (s32[]) tuple(i_plus_one) } Cond { param = (s32[]) parameter(0) i = s32[] get-tuple-element(param), index=0 trip_count = s32[] constant(1000000) ROOT done = pred[] compare(i, trip_count), direction=LE } ENTRY test { i_start = s32[] constant(10) initial_tuple = (s32[]) tuple(i_start) ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); WhileLoopTripCountAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get())); ASSERT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(auto config, m->entry_computation() ->root_instruction() ->backend_config<WhileLoopBackendConfig>()); EXPECT_EQ(999991, config.known_trip_count().n()); } TEST_F(TripCountAnnotatorTest, Int64Overflow) { const char* kModuleStr = R"( HloModule test Body { param = (s64[]) parameter(0) i = s64[] get-tuple-element(param), index=0 one = s64[] constant(1) i_plus_one = s64[] add(i, one) ROOT tuple = (s64[]) tuple(i_plus_one) } Cond { param = (s64[]) parameter(0) i = s64[] get-tuple-element(param), index=0 trip_count = s64[] constant(9223372036854775807) ROOT done = pred[] compare(i, trip_count), direction=LE } ENTRY test { i_start = s64[] constant(-9223372036854775808) initial_tuple = (s64[]) tuple(i_start) ROOT while = (s64[]) while(initial_tuple), condition=Cond, body=Body })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); WhileLoopTripCountAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get())); EXPECT_FALSE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
53602328-c7cf-4c6d-8ee9-b9779b3bed8b
cpp
tensorflow/tensorflow
ar_crs_combiner
third_party/xla/xla/service/ar_crs_combiner.cc
third_party/xla/xla/service/ar_crs_combiner_test.cc
#include "xla/service/ar_crs_combiner.h" #include <algorithm> #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/call_graph.h" #include "xla/service/hlo_replication_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::StatusOr<bool> ReplaceReplicatedAllReduce(HloModule* module, int64_t partition_count) { TF_ASSIGN_OR_RETURN( auto replication_analysis, HloReplicationAnalysis::Run(module, true)); bool changed = false; int64_t next_channel = hlo_query::NextChannelId(*module); for (auto computation : module->computations()) { for (auto instruction : computation->instructions()) { if (auto ar = DynCast<HloAllReduceInstruction>(instruction)) { const Shape& shape = ar->shape(); if (ar->channel_id()) { continue; } if (ar->replica_groups().size() > 1) { continue; } if (shape.IsTuple() || shape.element_type() != F32) { continue; } if (module->config().replica_count() < 8 * partition_count) { continue; } if (replication_analysis->HloInstructionIsReplicatedAt(ar, {})) { VLOG(2) << "Replaced replicated all-reduce:" << ar->ToString(); ar->set_channel_id(next_channel++); auto divisor = computation->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<float>(partition_count))); auto bcast = computation->AddInstruction( HloInstruction::CreateBroadcast(shape, divisor, {})); auto div = computation->AddInstruction(HloInstruction::CreateBinary( ar->shape(), HloOpcode::kDivide, ar, bcast)); TF_RETURN_IF_ERROR(ar->ReplaceAllUsesWith(div)); changed = true; } } } } return changed; } bool HasCombinableReplicaGroup(HloInstruction* hlo, int64_t num_partitions) { auto all_reduce = Cast<HloAllReduceInstruction>(hlo); auto replica_groups = all_reduce->replica_groups(); const int64_t replica_count = hlo->GetModule()->config().replica_count(); CHECK(all_reduce->IsCrossModuleAllReduce()); if (all_reduce->use_global_device_ids()) { if (replica_groups.size() != replica_count) { return false; } for (const auto& group : replica_groups) { if (group.replica_ids_size() != num_partitions) { return false; } absl::flat_hash_set<int64_t> partition_ids; int64_t replica_id = group.replica_ids(0) / num_partitions; for (int64_t i = 0; i < num_partitions; ++i) { if (group.replica_ids(i) / num_partitions != replica_id) { return false; } partition_ids.insert(group.replica_ids(i) % num_partitions); } if (partition_ids.size() != num_partitions) { return false; } } return true; } return replica_groups.size() == replica_count; } } namespace m = match; std::optional<ArCrsCombiner::ArCrsPair> ArCrsCombiner::MatchesArCrsPattern( HloInstruction* instruction) { auto can_ar_move_past_instruction = [](HloInstruction* instruction) -> bool { if (instruction->user_count() != 1) { return false; } switch (instruction->opcode()) { case HloOpcode::kBitcast: case HloOpcode::kTranspose: case HloOpcode::kReshape: return true; case HloOpcode::kConvert: return ShapeUtil::ElementIsFloating(instruction->shape()) == ShapeUtil::ElementIsFloating(instruction->operand(0)->shape()); case HloOpcode::kAdd: case HloOpcode::kSubtract: case HloOpcode::kMultiply: return ShapeUtil::ElementIsFloating(instruction->shape()); default: return false; } }; auto computation_is_addition = [](HloComputation* c) { return c->instruction_count() == 3 && Match(c->root_instruction(), m::Add(m::Parameter(), m::Parameter())); }; if (instruction->IsCrossModuleAllReduce() && HasCombinableReplicaGroup(instruction, num_spatial_partitions_) && computation_is_addition(instruction->called_computations()[0]) && instruction->user_count() == 1) { auto next = instruction->users()[0]; int64_t distance = 1; while (!next->IsCrossReplicaAllReduce()) { if (can_ar_move_past_instruction(next)) { next = next->users()[0]; } else { return std::nullopt; } ++distance; } if (!Cast<HloAllReduceInstruction>(next)->IsNoop() && computation_is_addition(next->called_computations()[0])) { ArCrsPair pair(instruction, next, distance); VLOG(2) << "ArCrsPair matching pattern: " << pair.ToString(); return pair; } } return std::nullopt; } std::optional<HloInstruction*> ArCrsCombiner::WhileFromBodyParameter( HloInstruction* instruction) { CHECK_EQ(HloOpcode::kParameter, instruction->opcode()); HloComputation* computation = instruction->parent(); auto caller_instructions = call_graph_->GetComputationCallers(computation); if (caller_instructions.size() == 1) { auto caller_instruction = caller_instructions[0]; if (caller_instruction->opcode() == HloOpcode::kWhile) { return caller_instruction; } } return std::nullopt; } std::optional<HloInstruction*> ArCrsCombiner::ConditionalFromBodyParameter( HloInstruction* instruction) { CHECK_EQ(HloOpcode::kParameter, instruction->opcode()); HloComputation* computation = instruction->parent(); auto caller_instructions = call_graph_->GetComputationCallers(computation); if (caller_instructions.size() == 1) { auto caller_instruction = caller_instructions[0]; if (caller_instruction->opcode() == HloOpcode::kConditional) { return caller_instruction; } } return std::nullopt; } std::optional<std::vector<HloInstruction*>> ArCrsCombiner::GetAllTuples( HloInstruction* instruction, absl::flat_hash_set<HloInstruction*>* visited) { if (visited->find(instruction) != visited->end()) { return std::vector<HloInstruction*>(); } visited->insert(instruction); switch (instruction->opcode()) { case HloOpcode::kTuple: { return std::vector<HloInstruction*>({instruction}); } case HloOpcode::kDomain: { return GetAllTuples(instruction->operands()[0], visited); } case HloOpcode::kParameter: { auto maybe_while = WhileFromBodyParameter(instruction); if (maybe_while) { auto while_instr = *maybe_while; auto init_tuples = GetAllTuples(while_instr->while_init(), visited); auto body_tuples = GetAllTuples( while_instr->while_body()->root_instruction(), visited); if (!init_tuples || !body_tuples) { return std::nullopt; } auto result = *init_tuples; result.insert(result.end(), body_tuples->begin(), body_tuples->end()); return result; } auto maybe_conditional = ConditionalFromBodyParameter(instruction); if (maybe_conditional) { auto cond_instr = *maybe_conditional; std::vector<HloInstruction*> tuples; for (int64_t i = 0; i < cond_instr->branch_computations().size(); ++i) { if (cond_instr->branch_computation(i)->parameter_instruction(0) == instruction) { auto branch_tuples = GetAllTuples(cond_instr->mutable_operand(i + 1), visited); if (!branch_tuples) { return std::nullopt; } tuples.insert(tuples.end(), branch_tuples->begin(), branch_tuples->end()); } } return tuples; } return std::nullopt; } case HloOpcode::kGetTupleElement: { std::vector<HloInstruction*> result_tuples; auto tuples = GetAllTuples(instruction->operands()[0], visited); if (!tuples) { return std::nullopt; } for (auto tuple : *tuples) { auto tmp_tuples = GetAllTuples( tuple->mutable_operand(instruction->tuple_index()), visited); if (!tmp_tuples) { return std::nullopt; } result_tuples.insert(result_tuples.end(), tmp_tuples->begin(), tmp_tuples->end()); } return result_tuples; } case HloOpcode::kConditional: { std::vector<HloInstruction*> result_tuples; const auto& branch_computations = instruction->branch_computations(); result_tuples.reserve(branch_computations.size()); for (HloComputation* body : branch_computations) { if (body->root_instruction()->opcode() != HloOpcode::kTuple) { return std::nullopt; } result_tuples.push_back(body->root_instruction()); } return result_tuples; } case HloOpcode::kWhile: { auto init_tuples = GetAllTuples(instruction->while_init(), visited); auto body_tuples = GetAllTuples(instruction->while_body()->root_instruction(), visited); if (!init_tuples || !body_tuples) { return std::nullopt; } auto result = *init_tuples; result.insert(result.end(), body_tuples->begin(), body_tuples->end()); return result; } default: return std::nullopt; } } bool ArCrsCombiner::TupleElementsComputeSameValue( HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2, absl::flat_hash_map<int64_t, int64_t>* visited_pairs) { absl::flat_hash_set<HloInstruction*> visited; auto tuples = GetAllTuples(tuple_shaped_instruction, &visited); if (!tuples) { return false; } for (auto tuple : *tuples) { CHECK_EQ(tuple->opcode(), HloOpcode::kTuple); if (!InstructionsComputeSameValue(tuple->mutable_operand(i1), tuple->mutable_operand(i2), visited_pairs)) { return false; } } return true; } bool ArCrsCombiner::TestInstructionsComputeSameValue(HloInstruction* i1, HloInstruction* i2) { ArCrsCombiner combiner(2, false); auto module = i1->GetModule(); CHECK_EQ(module, i2->GetModule()); combiner.call_graph_ = CallGraph::Build(module); absl::flat_hash_map<int64_t, int64_t> visited_pairs; return combiner.InstructionsComputeSameValue(i1, i2, &visited_pairs); } bool ArCrsCombiner::InstructionsComputeSameValue( HloInstruction* i1, HloInstruction* i2, absl::flat_hash_map<int64_t, int64_t>* visited_pairs) { if (i1 == i2) { return true; } auto uid1 = i1->unique_id(); auto uid2 = i2->unique_id(); auto min_uid = std::min(uid1, uid2); auto max_uid = std::max(uid1, uid2); auto it = visited_pairs->find(min_uid); if (it != visited_pairs->end() && max_uid == it->second) { return true; } auto opcode1 = i1->opcode(); auto operands1 = i1->operands(); if (opcode1 != i2->opcode() || operands1.size() != i2->operands().size()) { return false; } auto eq_computations = [](const HloComputation* a, const HloComputation* b) { return *a == *b; }; auto eq_operands = [](const HloInstruction*, const HloInstruction*) { return true; }; if (i1->IsCrossModuleAllReduce()) { return i1->Identical(*i2, eq_operands, eq_computations, false); } visited_pairs->emplace(min_uid, max_uid); for (int i = 0; i < operands1.size(); ++i) { auto operand1 = operands1[i]; auto operand2 = i2->operands()[i]; if (!InstructionsComputeSameValue(operand1, operand2, visited_pairs)) { return false; } } if (opcode1 == HloOpcode::kParameter) { return false; } if (opcode1 == HloOpcode::kGetTupleElement) { return i1->tuple_index() == i2->tuple_index() || TupleElementsComputeSameValue(operands1[0], i1->tuple_index(), i2->tuple_index(), visited_pairs); } auto eq_instructions = [](const HloInstruction* i1, const HloInstruction* i2) -> bool { return true; }; return i1->Identical(*i2, eq_instructions, eq_computations, false); } void ArCrsCombiner::GroupAllReducesById(HloModule* module) { absl::flat_hash_set<int64_t> discarded_ar_ids; for (HloComputation* computation : module->MakeNonfusionComputations()) { for (HloInstruction* instruction : computation->instructions()) { auto maybe_pair = MatchesArCrsPattern(instruction); if (maybe_pair) { auto pair = *maybe_pair; int64_t ar_id = *(instruction->channel_id()); if (discarded_ar_ids.find(ar_id) != discarded_ar_ids.end()) { continue; } auto it = crs_reserved_map_.find(pair.crs); if (it != crs_reserved_map_.end()) { auto prev_ar_id = it->second; CHECK(all_reduce_map_.find(ar_id) == all_reduce_map_.end()); CHECK_NE(prev_ar_id, ar_id); auto prev_pair = all_reduce_map_[prev_ar_id].back(); int64_t prev_distance = prev_pair.distance; if (prev_distance < pair.distance) { VLOG(2) << "Replacing ArCrsPair: " << prev_pair.ToString() << " with ArCrsPair: " << pair.ToString(); all_reduce_map_.erase(prev_ar_id); discarded_ar_ids.insert(prev_ar_id); all_reduce_map_[ar_id].push_back(pair); crs_reserved_map_[pair.crs] = ar_id; } else { discarded_ar_ids.insert(ar_id); } } else { if (all_reduce_map_.find(ar_id) != all_reduce_map_.end()) { int64_t prev_distance = all_reduce_map_[ar_id].back().distance; CHECK_EQ(prev_distance, pair.distance) << "All ARs with the same AR ID must have the same distance " "from the corresponding CRSs. Found: " << prev_distance << " and " << pair.distance; } all_reduce_map_[ar_id].push_back(pair); crs_reserved_map_[pair.crs] = ar_id; } } } } } absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsMPMD() { for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) { auto copy_it = it++; auto channel_id = copy_it->first; VLOG(2) << "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: " << channel_id << "\n"; auto pairs_vec = copy_it->second; TF_RET_CHECK(pairs_vec.size() == num_spatial_partitions_); auto instr_0 = pairs_vec[0].ar; for (int i = 1; i < pairs_vec.size(); ++i) { auto instr_i = pairs_vec[i].ar; auto next_0 = instr_0->users()[0]; auto next_i = instr_i->users()[0]; absl::flat_hash_map<int64_t, int64_t> visited_pairs; while (true) { if (!InstructionsComputeSameValue(next_0, next_i, &visited_pairs)) { all_reduce_map_.erase(copy_it); VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce " "channel id: " << channel_id << "\n"; break; } if (next_0->IsCrossReplicaAllReduce()) { break; } next_0 = next_0->users()[0]; next_i = next_i->users()[0]; } } } return absl::OkStatus(); } absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsSPMD( HloModule* module) { TF_ASSIGN_OR_RETURN( auto replication_analysis, HloReplicationAnalysis::Run(module, true)); for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) { auto copy_it = it++; auto channel_id = copy_it->first; VLOG(2) << "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: " << channel_id << "\n"; auto pairs_vec = copy_it->second; TF_RET_CHECK(pairs_vec.size() == 1); auto instr = pairs_vec[0].ar; auto next = instr->users()[0]; while (true) { TF_RET_CHECK(next->shape().IsArray()); if (!replication_analysis->HloInstructionIsReplicatedAt(next, {})) { all_reduce_map_.erase(copy_it); VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce " "channel id: " << channel_id << "\n"; break; } if (next->IsCrossReplicaAllReduce()) { break; } next = next->users()[0]; } } return absl::OkStatus(); } absl::StatusOr<bool> ArCrsCombiner::RewriteGraph() { if (all_reduce_map_.empty()) { return false; } for (const auto& it : all_reduce_map_) { auto pairs_vec = it.second; for (auto pair : pairs_vec) { auto all_reduce = pair.ar; auto parent_computation = all_reduce->parent(); auto channel_id = all_reduce->channel_id(); auto prev = all_reduce->mutable_operand(0); auto next = all_reduce->users()[0]; TF_CHECK_OK(all_reduce->ReplaceUseWith(next, prev)); TF_CHECK_OK(parent_computation->RemoveInstruction(all_reduce)); while (!next->IsCrossReplicaAllReduce()) { switch (next->opcode()) { case HloOpcode::kBitcast: case HloOpcode::kTranspose: case HloOpcode::kReshape: case HloOpcode::kConvert: case HloOpcode::kMultiply: break; case HloOpcode::kAdd: case HloOpcode::kSubtract: { auto other_operand = (next->operands()[0] == prev) ? next->operands()[1] : next->operands()[0]; if (other_operand->IsCrossModuleAllReduce() && other_operand->user_count() == 1) { TF_CHECK_OK(other_operand->ReplaceAllUsesWith( other_operand->mutable_operand(0))); } else { auto shape = other_operand->shape(); Literal lit(shape); lit.PopulateWithValue<float>(num_spatial_partitions_); auto divisor = parent_computation->AddInstruction( HloInstruction::CreateConstant(lit.Clone())); auto division = parent_computation->AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kDivide, other_operand, divisor)); TF_CHECK_OK(other_operand->ReplaceUseWith(next, division)); } break; } default: LOG(FATAL) << "Unexpected instruction: " << next->ToShortString(); } prev = next; next = next->users()[0]; } next->set_channel_id(channel_id); } } return true; } absl::StatusOr<bool> ArCrsCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { call_graph_ = CallGraph::Build(module); GroupAllReducesById(module); if (spmd_partition_) { TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsSPMD(module)); } else { TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsMPMD()); } TF_ASSIGN_OR_RETURN(auto changed, RewriteGraph()); if (module->config().replica_count() > 1 && spmd_partition_) { TF_ASSIGN_OR_RETURN(auto replaced, ReplaceReplicatedAllReduce( module, num_spatial_partitions_)); changed |= replaced; } return changed; } }
#include "xla/service/ar_crs_combiner.h" #include <cstdint> #include <memory> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class ArCrsCombinerTest : public HloTestBase {}; TEST_F(ArCrsCombinerTest, SameValueTestBasecase) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}}) %constant.f32.2 = f32[2,2] constant({{1, 2}, {3, 4}}) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue( i1, module->entry_computation()->parameter_instruction(0))); EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestBasecase2) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (x: f32[]) -> (f32[], f32[]) { %x = f32[] parameter(0) ROOT %tuple = (f32[], f32[]) tuple(%x, %x) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestBasecase3) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (x: f32[], y: f32[]) -> (f32[], f32[]) { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %tuple = (f32[], f32[]) tuple(%x, %y) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestNumOperands) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> ((f32[2,2]), (f32[2,2], f32[2,2])) { %p = f32[2,2] parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %tuple1 = (f32[2,2]) tuple(%constant.f32) %tuple2 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %tuple = ((f32[2,2]), (f32[2,2], f32[2,2])) tuple(%tuple1, %tuple2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesMatch) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) { %p = f32[2] parameter(0) %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]} %slice.2 = f32[1] slice(f32[2] %p), slice={[0:1]} ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesDontMatch) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) { %p = f32[2] parameter(0) %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]} %slice.2 = f32[1] slice(f32[2] %p), slice={[1:2]} ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestTupleElementSameIndex) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=0 ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex1) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1 ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex2) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}}) %constant.f32.2 = f32[2,2] constant({{2, 3}, {4, 5}}) %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2) %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1 ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestWhile1) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.0 = s32[] constant(0) %constant.1 = s32[] constant(1) ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT } %body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32) %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto body_tuple = root_while->while_body()->root_instruction(); auto i1 = body_tuple->operands()[0]; auto i2 = body_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestWhile2) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.0 = s32[] constant(0) %constant.1 = s32[] constant(1) ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT } %body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32) %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32.1 = f32[2,2] constant({{3, 4}, {5, 6}}) %constant.f32.2 = f32[2,2] constant({{3, 4}, {7, 8}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto body_tuple = root_while->while_body()->root_instruction(); auto i1 = body_tuple->operands()[0]; auto i2 = body_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestWhile3) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.0 = s32[] constant(0) %constant.1 = s32[] constant(1) ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT } %body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}}) %constant.f32.2 = f32[2,2] constant({{3, 4}, {1, 2}}) %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32.1) %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32.2) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto body_tuple = root_while->while_body()->root_instruction(); auto i1 = body_tuple->operands()[0]->operands()[0]; auto i2 = body_tuple->operands()[1]->operands()[0]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestNestedWhile) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) ROOT %t = pred[] constant(true) } %body_inner (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %gte.1 = f32[2,2] get-tuple-element(%x), index=0 %gte.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%gte.1, %constant.f32) %add.2 = f32[2,2] add(%gte.2, %constant.f32) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } %body_outer (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %gte.1 = f32[2,2] get-tuple-element(%x), index=0 %gte.2 = f32[2,2] get-tuple-element(%x), index=1 %init = (f32[2,2], f32[2,2]) tuple(%gte.1, %gte.2) ROOT %while.1 = (f32[2,2], f32[2,2]) while(%init), condition=%condition, body=%body_inner } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body_outer } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto inner_while = root_while->while_body()->root_instruction(); auto i1 = inner_while->while_body()->root_instruction()->operands()[0]; auto i2 = inner_while->while_body()->root_instruction()->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } void CompareReplicaGroups(absl::Span<const ReplicaGroup> groups_before, absl::Span<const ReplicaGroup> groups_after) { ASSERT_EQ(groups_before.size(), groups_after.size()); for (int i = 0; i < groups_before.size(); ++i) { auto group_before = groups_before[i]; std::vector<int64_t> ids_before(group_before.replica_ids().begin(), group_before.replica_ids().end()); auto group_after = groups_after[i]; std::vector<int64_t> ids_after(group_after.replica_ids().begin(), group_after.replica_ids().end()); EXPECT_EQ(ids_before, ids_after); } } TEST_F(ArCrsCombinerTest, RewriteArConvertCrs) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) { %p = bf16[] parameter(0) %constant.bf16 = bf16[] constant(1) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%convert.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Convert(op::Parameter())), op::AllReduce(op::Convert(op::Constant())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArConvertCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1) %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Convert(op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArBitcastCrs) { const char* module_str = R"( HloModule foobar %sum.1 (a: f32[2,1], b: f32[2,1]) -> f32[2,1] { %a = f32[2,1] parameter(0) %b = f32[2,1] parameter(1) ROOT %add = f32[2,1] add(%a, %b) } %sum.2 (x: f32[2], y: f32[2]) -> f32[2] { %x = f32[2] parameter(0) %y = f32[2] parameter(1) ROOT %add = f32[2] add(%x, %y) } ENTRY %entrycomp (p: f32[2,1]) -> (f32[2], f32[2]) { %p = f32[2,1] parameter(0) %all-reduce.ar.1 = f32[2,1] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=0} %bitcast.1 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.1) %all-reduce.1 = f32[2] all-reduce(%bitcast.1), replica_groups={{0,1}}, to_apply=%sum.2, sharding={maximal device=0} %all-reduce.ar.2 = f32[2,1] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=1} %bitcast.2 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.2) %all-reduce.2 = f32[2] all-reduce(%bitcast.2), replica_groups={{0,1}}, to_apply=%sum.2, sharding={maximal device=1} ROOT %tuple = (f32[2], f32[2]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Bitcast(op::Parameter())), op::AllReduce(op::Bitcast(op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrs) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=0} %multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=1} %multiply.2 = f32[] multiply(%all-reduce.ar.2, %constant.f32), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%multiply.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())), op::AllReduce(op::Multiply(op::Parameter(), op::Constant())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32 %multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32) %all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrs) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32 = f32[] constant(2) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %add.1 = f32[] add(%constant.f32, %convert.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %add.2 = f32[] add(%constant.f32, %convert.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%add.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple( op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()), op::Convert())), op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()), op::Convert())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32 = f32[] constant(2) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %add.1 = f32[] add(%constant.f32, %convert.1) %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Divide(op::Constant(), op::Constant()), op::Convert())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewrite) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32.1 = f32[] constant(2) %constant.f32.2 = f32[] constant(3) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %add.1 = f32[] add(%constant.f32.1, %convert.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %add.2 = f32[] add(%constant.f32.2, %convert.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%add.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewriteSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32.1 = f32[] constant(2) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1) %add.1 = f32[] add(%p, %convert.1) %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, ArThenCrsDontCrash) { const char* module_str = R"( HloModule foobar %sum.1 (a: f32[], b: f32[]) -> f32[] { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%all-reduce.ar.1), replica_groups={{0,1}}, to_apply=%sum.1, sharding={maximal device=0} %multiply.1 = f32[] multiply(%all-reduce.1, %constant.f32), sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%all-reduce.ar.2), replica_groups={{0,1}}, to_apply=%sum.1, sharding={maximal device=1} %multiply.2 = f32[] multiply(%all-reduce.2, %constant.f32), sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Parameter()), op::AllReduce(op::Parameter()))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleAdds) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.1 = f32[] constant(1) %constant.2 = f32[] constant(2) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %add.11 = f32[] add(%constant.1, %all-reduce.ar.1), sharding={maximal device=0} %add.12 = f32[] add(%constant.2, %add.11), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %add.21 = f32[] add(%constant.1, %all-reduce.ar.2), sharding={maximal device=0} %add.22 = f32[] add(%constant.2, %add.21), sharding={maximal device=0} %all-reduce.2 = f32[] all-reduce(%add.22), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Divide(op::Constant(), op::Constant()), op::Add(op::Divide(op::Constant(), op::Constant()), op::Parameter()))), op::AllReduce(op::Add( op::Divide(op::Constant(), op::Constant()), op::Add(op::Divide(op::Constant(), op::Constant()), op::Parameter()))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleAddsSPMD) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.1 = f32[] constant(1) %constant.2 = f32[] constant(2) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum %add.11 = f32[] add(%constant.1, %all-reduce.ar.1) %add.12 = f32[] add(%constant.2, %add.11) %all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce( op::Add(op::Divide(op::Constant(), op::Constant()), op::Add(op::Divide(op::Constant(), op::Constant()), op::Parameter()))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArSubtractCrs) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=0} %sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=1} %sub.2 = f32[] subtract(%constant.f32, %all-reduce.ar.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%sub.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple( op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()), op::Parameter())), op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArSubtractCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32 %sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1) %all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Subtract( op::Divide(op::Constant(), op::Constant()), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeft) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %add11 = f32[] add(%ar11, %const1), sharding={maximal device=0} %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=0} %add12 = f32[] add(%add11, %ar12), sharding={maximal device=0} %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} %ar21 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=1} %add21 = f32[] add(%ar21, %const1), sharding={maximal device=1} %ar22 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=1} %add22 = f32[] add(%add21, %ar22), sharding={maximal device=1} %crs2 = f32[] all-reduce(%add22), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%crs1, %crs2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())), op::Parameter())), op::AllReduce(op::Add( op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeftSPMD) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum %add11 = f32[] add(%ar11, %const1) %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum %add12 = f32[] add(%add11, %ar12) %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum ROOT %tuple = (f32[]) tuple(%crs1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsRight) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=0} %add11 = f32[] add(%ar12, %const1), sharding={maximal device=0} %add12 = f32[] add(%ar11, %add11), sharding={maximal device=0} %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} %ar21 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=1} %ar22 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=1} %add21 = f32[] add(%ar22, %const1), sharding={maximal device=1} %add22 = f32[] add(%ar21, %add21), sharding={maximal device=1} %crs2 = f32[] all-reduce(%add22), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%crs1, %crs2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Parameter(), op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())))), op::AllReduce(op::Add( op::Parameter(), op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsRightSPMD) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum %add11 = f32[] add(%ar12, %const1) %add12 = f32[] add(%ar11, %add11) %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum ROOT %tuple = (f32[]) tuple(%crs1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Parameter(), op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, OneReplicaDontRewrite) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) { %p = bf16[] parameter(0) %constant.bf16 = bf16[] constant(1) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%convert.2), replica_groups={{0}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 1)); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, OneReplicaDontRewriteSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %constant.bf16 = bf16[] constant(1) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1) %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 1)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, SameValueTestConditional) { const char* module_str = R"( HloModule foobar branch_true { pt = (f32[2,4], f32[2,4]) parameter(0) gte.0 = f32[2,4] get-tuple-element(pt), index=0 gte.1 = f32[2,4] get-tuple-element(pt), index=1 ROOT tuple.t = (f32[2,4], f32[2,4]) tuple(gte.1, gte.0) } branch_false { pf = (f32[2,4], f32[2,4]) parameter(0) gte.0 = f32[2,4] get-tuple-element(pf), index=0 gte.1 = f32[2,4] get-tuple-element(pf), index=1 add = f32[2,4] add(gte.1, gte.1) ROOT tuple.f = (f32[2,4], f32[2,4]) tuple(gte.0, add) } ENTRY Parameters1.v4 { constant = pred[] constant(true) p = f32[2,4] parameter(0) tuple = (f32[2,4], f32[2,4]) tuple(p, p) ROOT conditional = (f32[2,4], f32[2,4]) conditional(constant, tuple, tuple), true_computation=branch_true, false_computation=branch_false } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto cond = module->entry_computation()->root_instruction(); auto branch_true = cond->branch_computation(0)->root_instruction(); auto t0 = branch_true->mutable_operand(0); auto t1 = branch_true->mutable_operand(1); EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(t0, t1)); auto branch_false = cond->branch_computation(1)->root_instruction(); auto f0 = branch_false->mutable_operand(0); auto f1 = branch_false->mutable_operand(1); EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(f0, f1)); } TEST_F(ArCrsCombinerTest, AllReduceWithReplicas) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) { %p = bf16[] parameter(0) %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.3 = f32[] all-reduce(%all-reduce.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.2, %all-reduce.3), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, AllReduceWithReplicasSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0},{1}}, to_apply=%sum.f32 %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0},{1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.2) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, ReplaceReplicatedAllReduceSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[2,4]) -> f32[2,4] { %p = f32[2,4] parameter(0), sharding={replicated} ROOT %all-reduce = f32[2,4] all-reduce(%p), to_apply=%sum.f32, replica_groups={{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 32)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Divide(op::AllReduce(op::Parameter()), op::Broadcast(op::Constant()))); auto ar = root->operand(0); auto divisor = root->operand(1)->operand(0); EXPECT_TRUE(ar->channel_id()); EXPECT_TRUE(divisor->literal().IsAllFloat(2)); } TEST_F(ArCrsCombinerTest, AllReduceWithGlobalIdReplicaGroups) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1,2,3},{4,5,6,7}}, use_global_device_ids=true, to_apply=%sum.f32 %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.2) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2, 4)); ArCrsCombiner combiner(4, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f1c72f99-0fb9-453a-a24b-21b943ceb2cf
cpp
tensorflow/tensorflow
dynamic_dimension_simplifier
third_party/xla/xla/service/dynamic_dimension_simplifier.cc
third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc
#include "xla/service/dynamic_dimension_simplifier.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/status_macros.h" namespace xla { namespace { absl::StatusOr<bool> ConcatForwarding(HloInstruction* concat) { if (concat->opcode() != HloOpcode::kConcatenate) { return false; } bool changed = false; auto parent = concat->parent(); std::vector<HloInstruction*> new_operands; for (HloInstruction* operand : concat->operands()) { if (operand->opcode() != HloOpcode::kConcatenate || operand->concatenate_dimension() != concat->concatenate_dimension()) { new_operands.push_back(operand); } else { changed = true; for (HloInstruction* operand_operand : operand->operands()) { new_operands.push_back(operand_operand); } } } if (changed) { auto new_concat = parent->AddInstruction(HloInstruction::CreateConcatenate( concat->shape(), new_operands, concat->concatenate_dimension())); TF_RETURN_IF_ERROR(parent->ReplaceInstruction(concat, new_concat)); } return changed; } absl::StatusOr<bool> SliceConcatForwarding(HloInstruction* slice) { if (slice->opcode() != HloOpcode::kSlice) { return false; } auto concat = slice->mutable_operand(0); if (concat->opcode() != HloOpcode::kConcatenate) { return false; } if (slice->shape().rank() != 1) { return false; } int64_t concat_dim = concat->concatenate_dimension(); std::vector<HloInstruction*> new_operands; int64_t size_so_far = 0; int64_t slice_size = slice->shape().dimensions(concat_dim); if (slice_size != slice->slice_limits(0) - slice->slice_starts(0)) { return false; } if (slice->slice_strides(0) != 1) { return false; } for (HloInstruction* operand : concat->operands()) { if (size_so_far == slice->slice_starts(0) && operand->shape().dimensions(0) == slice_size) { TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(operand)); return true; } size_so_far += operand->shape().dimensions(concat_dim); } return false; } absl::StatusOr<bool> ReshapeBroadcastForwarding(HloInstruction* reshape) { if (reshape->opcode() != HloOpcode::kReshape) { return false; } auto broadcast = reshape->mutable_operand(0); if (broadcast->opcode() != HloOpcode::kBroadcast) { return false; } if (reshape->shape().rank() != 0) { return false; } if (broadcast->shape().rank() != 1) { return false; } if (broadcast->mutable_operand(0)->shape().rank() != 0) { return false; } TF_RETURN_IF_ERROR( reshape->ReplaceAllUsesWith(broadcast->mutable_operand(0))); return true; } absl::StatusOr<bool> ReshapeReshapeForwarding(HloInstruction* reshape) { if (reshape->opcode() != HloOpcode::kReshape) { return false; } auto reshape_2 = reshape->mutable_operand(0); if (reshape_2->opcode() != HloOpcode::kReshape) { return false; } if (!Shape::Equal()(reshape->shape(), reshape_2->operand(0)->shape())) { return false; } TF_RETURN_IF_ERROR( reshape->ReplaceAllUsesWith(reshape_2->mutable_operand(0))); return true; } absl::StatusOr<bool> IdentityConvertRemoving(HloInstruction* convert) { if (convert->opcode() != HloOpcode::kConvert) { return false; } auto operand = convert->mutable_operand(0); if (Shape::Equal()(convert->shape(), operand->shape())) { TF_RETURN_IF_ERROR(convert->ReplaceAllUsesWith(operand)); return true; } return false; } absl::StatusOr<bool> IdentityReshapeRemoving(HloInstruction* reshape) { if (reshape->opcode() != HloOpcode::kReshape) { return false; } auto operand = reshape->mutable_operand(0); if (Shape::Equal()(reshape->shape(), operand->shape())) { TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(operand)); return true; } return false; } } absl::StatusOr<bool> DynamicDimensionSimplifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 2, "DynamicDimensionSimplifier::Run(), before:\n" + module->ToString()); bool changed = false; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool local_changed, ConcatForwarding(inst)); changed |= local_changed; } } for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool local_changed, SliceConcatForwarding(inst)); changed |= local_changed; } } for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeBroadcastForwarding(inst)); changed |= local_changed; } } for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool local_changed, ReshapeReshapeForwarding(inst)); changed |= local_changed; } } for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool local_changed, IdentityConvertRemoving(inst)); changed |= local_changed; } } for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool local_changed, IdentityReshapeRemoving(inst)); changed |= local_changed; } } XLA_VLOG_LINES( 2, "DynamicDimensionSimplifier::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/dynamic_dimension_simplifier.h" #include <memory> #include <utility> #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/layout_util.h" #include "xla/literal.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/service/shape_inference.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" namespace xla { namespace { namespace m = match; class DynamicDimensionSimplifierTest : public HloTestBase {}; TEST_F(DynamicDimensionSimplifierTest, ForwardConcat) { const char* kModuleStr = R"( HloModule m test { p0 = s32[1] parameter(0) p1 = s32[1] parameter(1) p2 = s32[1] parameter(2) concat1 = s32[2] concatenate(p0, p1), dimensions={0} ROOT concat2 = s32[3] concatenate(concat1, p2), dimensions={0} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_TRUE(simplifier.Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Concatenate(m::Parameter(0), m::Parameter(1), m::Parameter(2)))); } TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatMultipleDims) { const char* kModuleStr = R"( HloModule m test { p0 = s32[1, 1] parameter(0) p1 = s32[1, 1] parameter(1) p2 = s32[2, 1] parameter(2) concat1 = s32[2, 1] concatenate(p0, p1), dimensions={0} ROOT concat2 = s32[2, 2] concatenate(concat1, p2), dimensions={1} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_FALSE(simplifier.Run(m.get()).value()); } TEST_F(DynamicDimensionSimplifierTest, ForwardConcatSlice) { const char* kModuleStr = R"( HloModule m test { p0 = s32[1] parameter(0) p1 = s32[1] parameter(1) p2 = s32[1] parameter(2) concat = s32[3] concatenate(p0, p1, p2), dimensions={0} ROOT slice = s32[1] slice(concat), slice={[1:2]} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_TRUE(simplifier.Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Parameter(1))); } TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceSizeMismatch) { const char* kModuleStr = R"( HloModule m test { p0 = s32[1] parameter(0) p1 = s32[1] parameter(1) p2 = s32[1] parameter(2) concat = s32[3] concatenate(p0, p1, p2), dimensions={0} ROOT slice = s32[2] slice(concat), slice={[1:3]} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_FALSE(simplifier.Run(m.get()).value()); } TEST_F(DynamicDimensionSimplifierTest, DoNotForwardConcatSliceStrided) { const char* kModuleStr = R"( HloModule m test { p0 = s32[1] parameter(0) p1 = s32[1] parameter(1) p2 = s32[1] parameter(2) concat = s32[3] concatenate(p0, p1, p2), dimensions={0} ROOT slice = s32[1] slice(concat), slice={[1:2:2]} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_FALSE(simplifier.Run(m.get()).value()); } TEST_F(DynamicDimensionSimplifierTest, BroadcastReshapeForwarding) { const char* kModuleStr = R"( HloModule m test { p0 = s32[] parameter(0) broadcast = s32[1] broadcast(p0), dimensions={} ROOT reshape = s32[] reshape(broadcast) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_TRUE(simplifier.Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Parameter(0))); } TEST_F(DynamicDimensionSimplifierTest, ReshapeReshapeForwarding) { const char* kModuleStr = R"( HloModule m test { p0 = s32[] parameter(0) reshape = s32[1] reshape(p0) ROOT reshape2 = s32[] reshape(reshape) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_TRUE(simplifier.Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Parameter(0))); } TEST_F(DynamicDimensionSimplifierTest, DoNotReshapeReshapeForwardingShapeMismatch) { const char* kModuleStr = R"( HloModule m test { p0 = s32[1, 1] parameter(0) reshape = s32[1] reshape(p0) ROOT reshape2 = s32[] reshape(reshape) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_FALSE(simplifier.Run(m.get()).value()); } TEST_F(DynamicDimensionSimplifierTest, IdConvertRemoving) { const char* kModuleStr = R"( HloModule m test { p0 = s32[1] parameter(0) ROOT reshape2 = s32[1] convert(p0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr)); DynamicDimensionSimplifier simplifier; ASSERT_TRUE(simplifier.Run(m.get()).value()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Parameter(0))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b2d66996-ec37-441c-9dba-74eedda21742
cpp
tensorflow/tensorflow
all_reduce_simplifier
third_party/xla/xla/service/all_reduce_simplifier.cc
third_party/xla/xla/service/all_reduce_simplifier_test.cc
#include "xla/service/all_reduce_simplifier.h" #include <cstdint> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_replication_analysis.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { absl::StatusOr<bool> AllReduceSimplifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_ASSIGN_OR_RETURN( auto replication, HloReplicationAnalysis::Run(module, false)); std::vector<std::pair<HloInstruction*, int64_t>> all_reduces_to_replace; auto get_participant_counts_for_replica_group = [](const HloInstruction* all_reduce) -> absl::StatusOr<int64_t> { const HloModuleConfig& config = all_reduce->GetModule()->config(); TF_ASSIGN_OR_RETURN( CollectiveOpGroupMode group_mode, GetCollectiveOpGroupMode(all_reduce->channel_id().has_value(), Cast<HloAllReduceInstruction>(all_reduce) ->use_global_device_ids())); int64_t num_devices = config.num_partitions(); int64_t num_replicas = config.replica_count(); TF_ASSIGN_OR_RETURN(std::vector<int64_t> participant_counts, GetPariticipantCountsForReplicaGroups( num_replicas, num_devices, all_reduce->replica_groups(), group_mode)); if (participant_counts.empty()) { return -1; } if (!absl::c_all_of(participant_counts, [&](int64_t participant_count) { return participant_count == participant_counts[0]; })) { return -1; } return participant_counts[0]; }; bool changed = false; for (auto computation : module->computations(execution_threads)) { for (HloInstruction* inst : computation->MakeInstructionPostOrder()) { if ((inst->opcode() == HloOpcode::kAllGather || inst->opcode() == HloOpcode::kReduceScatter) && ShapeUtil::Compatible(inst->shape(), inst->operand(0)->shape())) { changed = true; TF_RETURN_IF_ERROR( computation->ReplaceInstruction(inst, inst->mutable_operand(0))); } } } for (auto computation : module->computations(execution_threads)) { for (HloInstruction* inst : computation->MakeInstructionPostOrder()) { if (!inst->shape().IsArray()) { continue; } if (!inst->IsCrossReplicaAllReduce() && !inst->IsCrossModuleAllReduce()) { continue; } TF_ASSIGN_OR_RETURN(int64_t group_size, get_participant_counts_for_replica_group(inst)); if (group_size == -1 || (!inst->IsCrossReplicaAllReduce() && group_size != 1) || (!inst->IsCrossReplicaAllReduce() && !module->config().use_spmd_partitioning())) { continue; } if (replication->HloInstructionIsReplicatedAt(inst->operand(0), {}) || group_size == 1) { all_reduces_to_replace.push_back({inst, group_size}); } } } for (auto all_reduce_and_group_size : all_reduces_to_replace) { auto all_reduce = all_reduce_and_group_size.first; const int64_t replica_group_size = all_reduce_and_group_size.second; if (replica_group_size == 1) { TF_RETURN_IF_ERROR(all_reduce->parent()->ReplaceInstruction( all_reduce, all_reduce->mutable_operand(0))); changed = true; continue; } if (all_reduce->to_apply()->instruction_count() != 3 || all_reduce->to_apply()->num_parameters() != 2) { continue; } HloInstruction* replacement; switch (all_reduce->to_apply()->root_instruction()->opcode()) { case HloOpcode::kAdd: { auto multiplier = all_reduce->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(replica_group_size))); if (all_reduce->shape().element_type() != S32) { multiplier = all_reduce->parent()->AddInstruction( HloInstruction::CreateConvert( ShapeUtil::ChangeElementType( multiplier->shape(), all_reduce->shape().element_type()), multiplier)); } if (all_reduce->shape().rank() > 0) { multiplier = all_reduce->parent()->AddInstruction( HloInstruction::CreateBroadcast(all_reduce->shape(), multiplier, {})); } replacement = all_reduce->parent()->AddInstruction(HloInstruction::CreateBinary( all_reduce->shape(), HloOpcode::kMultiply, all_reduce->mutable_operand(0), multiplier)); break; } case HloOpcode::kMinimum: case HloOpcode::kMaximum: case HloOpcode::kOr: case HloOpcode::kAnd: replacement = all_reduce->mutable_operand(0); break; default: continue; } VLOG(2) << "Replacing " << all_reduce->ToString() << " with " << replacement->ToString(); TF_RETURN_IF_ERROR(all_reduce->ReplaceAllUsesWith(replacement)); changed = true; } return changed; } }
#include "xla/service/all_reduce_simplifier.h" #include <memory> #include <utility> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace m = match; using AllReduceSimplifierTest = HloTestBase; TEST_F(AllReduceSimplifierTest, ReplicatedParameters) { const char* kModuleStr = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } max { a.1 = f32[] parameter(0) b.1 = f32[] parameter(1) ROOT max = f32[] maximum(a.1, b.1) } min { a.2 = f32[] parameter(0) b.2 = f32[] parameter(1) ROOT min = f32[] minimum(a.2, b.2) } sum.1 { a.3 = f32[] parameter(0) b.3 = f32[] parameter(1) ROOT add.1 = f32[] add(a.3, b.3) } test { p0 = f32[8,16] parameter(0), parameter_replication={true} p1 = f32[8,16] parameter(1), parameter_replication={false} p2 = f32[] parameter(2), parameter_replication={true} all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=sum all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={}, to_apply=min all-reduce.3 = f32[] all-reduce(p2), replica_groups={}, to_apply=sum.1 ROOT tuple = (f32[8,16], f32[8,16], f32[8,16], f32[]) tuple(all-reduce, all-reduce.1, all-reduce.2, all-reduce.3) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( kModuleStr, 8)); AllReduceSimplifier simplifier(8); ASSERT_TRUE(simplifier.Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::MultiplyAnyOrder(m::Parameter(0), m::Broadcast(m::Convert(m::ConstantScalar(8)))), m::Parameter(0), m::AllReduce(m::Parameter(1)), m::MultiplyAnyOrder(m::Parameter(2), m::Convert(m::ConstantScalar(8)))))); } TEST_F(AllReduceSimplifierTest, AllReduceAfterAllReduce) { const char* kModuleStr = R"( HloModule m max { a.1 = f32[] parameter(0) b.1 = f32[] parameter(1) ROOT max = f32[] maximum(a.1, b.1) } sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } test { p0 = f32[8,16] parameter(0), parameter_replication={false} all-reduce = f32[8,16] all-reduce(p0), replica_groups={}, to_apply=max ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce), replica_groups={}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( kModuleStr, 8)); AllReduceSimplifier simplifier(8); ASSERT_TRUE(simplifier.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::MultiplyAnyOrder( m::AllReduce(m::Parameter(0)), m::Broadcast(m::Convert(m::ConstantScalar(8)))))); } TEST_F(AllReduceSimplifierTest, SubgroupAllReduce) { const char* kModuleStr = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } max { a.1 = f32[] parameter(0) b.1 = f32[] parameter(1) ROOT max = f32[] maximum(a.1, b.1) } min { a.2 = f32[] parameter(0) b.2 = f32[] parameter(1) ROOT min = f32[] minimum(a.2, b.2) } test { p0 = f32[8,16] parameter(0), parameter_replication={true} p1 = f32[8,16] parameter(1), parameter_replication={false} all-reduce = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum all-reduce.1 = f32[8,16] all-reduce(p0), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=max all-reduce.2 = f32[8,16] all-reduce(p1), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=min ROOT tuple = (f32[8,16], f32[8,16], f32[8,16]) tuple(all-reduce, all-reduce.1, all-reduce.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( kModuleStr, 8)); AllReduceSimplifier simplifier(8); ASSERT_TRUE(simplifier.Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::MultiplyAnyOrder(m::Parameter(0), m::Broadcast(m::Convert(m::ConstantScalar(4)))), m::Parameter(0), m::AllReduce(m::Parameter(1))))); } TEST_F(AllReduceSimplifierTest, TrivialSubgroupAllReduce) { const char* kModuleStr = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } test { p0 = f32[8,16] parameter(0), parameter_replication={false} ROOT all-reduce = f32[8,16] all-reduce(p0), replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( kModuleStr, 8)); AllReduceSimplifier simplifier(8); EXPECT_TRUE(simplifier.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Parameter(0))); } TEST_F(AllReduceSimplifierTest, TrivialSubgroupNonCrossReplicaAllReduce) { const char* kModuleStr = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } test { p0 = f32[8,16] parameter(0), parameter_replication={false} ROOT all-reduce = f32[8,16] all-reduce(p0), channel_id=1, use_global_device_ids=true, replica_groups={{0},{1},{2},{3},{4},{5},{6},{7}}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(kModuleStr, 1, 8)); module->mutable_config().set_use_spmd_partitioning(true); AllReduceSimplifier simplifier(1); EXPECT_TRUE(simplifier.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Parameter(0))); } TEST_F(AllReduceSimplifierTest, NonCrossReplicaAllReduceAfterAllReduce) { const char* kModuleStr = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } test { p0 = f32[8,16] parameter(0), parameter_replication={false} all-reduce = f32[8,16] all-reduce(p0), channel_id=1, use_global_device_ids=true, replica_groups={{0,2},{1,3},{4,6},{5,7}}, to_apply=sum ROOT all-reduce.1 = f32[8,16] all-reduce(all-reduce), channel_id=2, use_global_device_ids=true, replica_groups={{0,4},{1,5},{2,6},{3,7}}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(kModuleStr, 1, 8)); module->mutable_config().set_use_spmd_partitioning(true); AllReduceSimplifier simplifier(1); EXPECT_FALSE(simplifier.Run(module.get()).value()); } TEST_F(AllReduceSimplifierTest, MPMDNonCrossReplicaAllReduce) { const char* kModuleStr = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } test { p0 = f32[8,16] parameter(0), parameter_replication={false} ROOT all-reduce = f32[8,16] all-reduce(p0), channel_id=1, replica_groups={{0},{1}}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(kModuleStr, 2, 1)); module->mutable_config().set_use_spmd_partitioning(false); AllReduceSimplifier simplifier(2); EXPECT_FALSE(simplifier.Run(module.get()).value()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
08422a02-e75f-4e3b-88eb-cda17385701d
cpp
tensorflow/tensorflow
while_loop_fusible_sinking
third_party/xla/xla/service/while_loop_fusible_sinking.cc
third_party/xla/xla/service/while_loop_fusible_sinking_test.cc
#include "xla/service/while_loop_fusible_sinking.h" #include <cstdint> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/while_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { bool IsPurelyExpanding(const HloInstruction* instr) { return instr->opcode() == HloOpcode::kBroadcast || (instr->opcode() == HloOpcode::kConstant && instr->shape().rank() == 0) || instr->opcode() == HloOpcode::kIota; } bool IsFusionCandidate(const HloInstruction* instr) { return instr->opcode() != HloOpcode::kRng && (instr->IsElementwise() || instr->opcode() == HloOpcode::kReshape || instr->opcode() == HloOpcode::kTranspose); } } bool WhileLoopFusibleSinking::IsSinkableFusion(HloInstruction* while_operand) { absl::InlinedVector<HloInstruction*, 8> worklist; absl::flat_hash_set<int> visited; worklist.push_back(while_operand); while (!worklist.empty()) { HloInstruction* to_process = worklist.back(); worklist.pop_back(); if (!to_process->IsFusible()) { return false; } if (!visited.insert(to_process->unique_id()).second) { if (visited.size() > 100) { return false; } continue; } if (IsPurelyExpanding(to_process)) { continue; } if (IsFusionCandidate(to_process)) { for (auto* op : to_process->operands()) { worklist.push_back(op); } continue; } return false; } return true; } HloInstruction* WhileLoopFusibleSinking::CreateSinkableFusion( HloInstruction* while_operand) { HloInstruction* fusion = while_operand->AddInstruction(while_operand->CreateFusion( while_operand->shape(), HloInstruction::FusionKind::kLoop, while_operand)); bool did_fuse = IsFusionCandidate(while_operand); while (did_fuse) { did_fuse = false; for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) { HloInstruction* op = fusion->mutable_operand(i); if (IsPurelyExpanding(op)) { continue; } fusion->FuseInstruction(op); did_fuse = true; break; } } did_fuse = true; while (did_fuse) { did_fuse = false; for (int64_t i = fusion->operand_count() - 1; i >= 0; --i) { HloInstruction* op = fusion->mutable_operand(i); if (IsPurelyExpanding(op)) { fusion->FuseInstruction(op); did_fuse = true; break; } } } return fusion; } absl::StatusOr<bool> WhileLoopFusibleSinking::TrySinkingFusiblesIntoWhileLoop( HloInstruction* while_instr) { HloComputation* while_cond = while_instr->while_condition(); HloComputation* while_body = while_instr->while_body(); if (call_counts_[while_body] > 1 || call_counts_[while_cond] > 1) { return false; } HloInstruction* init_value = while_instr->mutable_operand(0); if (init_value->opcode() != HloOpcode::kTuple) { return false; } bool changed = false; std::vector<HloInstruction*> invariant_body_gtes = WhileUtil::GetInvariantGTEsForWhileBody(*while_body); std::vector<int64_t> tuple_indices; std::vector<HloInstruction*> new_operands; for (HloInstruction* invariant_body_gte : invariant_body_gtes) { int64_t index = invariant_body_gte->tuple_index(); if (while_instr->operand_count() == 0 || init_value->operand_count() == 0) { CHECK_EQ(while_instr->user_count(), 0); VLOG(3) << "Each element in the operand tuple of the while instruction '" << while_instr->name() << "' was an invariant value, whose usage has been replaced " " directly by the value."; break; } HloInstruction* invariant_value = init_value->mutable_operand(index); if (absl::c_any_of(invariant_body_gte->users(), [](const HloInstruction* use) { switch (use->opcode()) { case HloOpcode::kDynamicSlice: case HloOpcode::kGather: case HloOpcode::kSlice: return true; default: return false; } })) { continue; } if (init_value->IsRoot() || init_value->user_count() > 1) { init_value = init_value->AddInstruction(init_value->Clone()); TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith(0, init_value)); } if (!IsSinkableFusion(invariant_value)) { continue; } HloInstruction* fusion = CreateSinkableFusion(invariant_value); changed = true; if (fusion->operand_count() > 0 && (while_instr->IsRoot() || absl::c_any_of(while_instr->users(), [&](HloInstruction* use) { return use->opcode() != HloOpcode::kGetTupleElement; }))) { auto uses = while_instr->users(); std::vector<HloInstruction*> gtes(init_value->operand_count()); for (int64_t i = 0; i < gtes.size(); ++i) { gtes[i] = while_instr->AddInstruction( HloInstruction::CreateGetTupleElement(while_instr, i)); } HloInstruction* tuple = while_instr->AddInstruction(HloInstruction::CreateTuple(gtes)); if (while_instr->IsRoot()) { while_instr->parent()->set_root_instruction(tuple); } if (!uses.empty()) { TF_RETURN_IF_ERROR(while_instr->ReplaceUsesWith(uses, tuple)); } } absl::InlinedVector<HloInstruction*, 2> invariant_output_uses; for (auto use : while_instr->users()) { if (use->opcode() == HloOpcode::kGetTupleElement && use->tuple_index() == index) { invariant_output_uses.push_back(use); } } for (auto use : invariant_output_uses) { TF_RETURN_IF_ERROR( while_instr->parent()->ReplaceInstruction(use, invariant_value)); } HloInstruction* root = while_body->root_instruction(); HloInstruction* parameter = while_body->parameter_instruction(0); tuple_indices.resize(fusion->operand_count()); int64_t next_index = init_value->operand_count(); new_operands.resize(fusion->operand_count()); for (int64_t i = 0; i < fusion->operand_count(); ++i) { init_value->AppendOperand(fusion->mutable_operand(i)); parameter->mutable_shape()->mutable_tuple_shapes()->push_back( fusion->mutable_operand(i)->shape()); new_operands[i] = root->AddInstruction( HloInstruction::CreateGetTupleElement(parameter, next_index++)); root->AppendOperand(new_operands[i]); } *(init_value->mutable_shape()) = parameter->shape(); *(while_instr->mutable_shape()) = parameter->shape(); *(while_cond->parameter_instruction(0)->mutable_shape()) = parameter->shape(); *(root->mutable_shape()) = parameter->shape(); auto cloned_fusion = while_body->AddInstruction( fusion->CloneWithNewOperands(fusion->shape(), new_operands)); TF_RETURN_IF_ERROR(fusion->parent()->RemoveInstruction(fusion)); TF_RETURN_IF_ERROR( while_body->ReplaceInstruction(invariant_body_gte, cloned_fusion)); TF_RETURN_IF_ERROR(cloned_fusion->Defuse()); } return changed; } absl::StatusOr<bool> WhileLoopFusibleSinking::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { call_counts_.clear(); bool changed = false; std::vector<HloInstruction*> while_instrs; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs), HloPredicateIsOp<HloOpcode::kWhile>); } for (HloInstruction* while_instr : while_instrs) { call_counts_[while_instr->while_body()]++; call_counts_[while_instr->while_condition()]++; } for (HloInstruction* while_instr : while_instrs) { TF_ASSIGN_OR_RETURN(bool result, TrySinkingFusiblesIntoWhileLoop(while_instr)); changed |= result; } return changed; } }
#include "xla/service/while_loop_fusible_sinking.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; using ::testing::_; using WhileLoopFusibleSinkingTest = HloTestBase; TEST_F(WhileLoopFusibleSinkingTest, SinkOneFusible) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[2],f32[2]) parameter(0) p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0 p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1 add.0 = f32[2] add(p_body.0, p_body.1) ROOT root = (f32[2],f32[2]) tuple(add.0, p_body.1) } condition { p_cond = (f32[2],f32[2]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[2] parameter(0) const_1 = f32[2] iota(), iota_dimension=0 while_init = (f32[2],f32[2]) tuple(const_0, const_1) ROOT while = (f32[2],f32[2]) while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopFusibleSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::Add(_, op::Iota()), _)); } TEST_F(WhileLoopFusibleSinkingTest, SinkMask) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[5,7],f32[5,7]) parameter(0) p_body.0 = get-tuple-element(p_body), index=0 p_body.1 = get-tuple-element(p_body), index=1 add.0 = add(p_body.0, p_body.1) ROOT root = tuple(add.0, p_body.1) } condition { p_cond = (f32[5,7],f32[5,7]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[5,7] parameter(0) p = f32[5] parameter(1) a = f32[5,7] iota(), iota_dimension=0 b = f32[5,7] iota(), iota_dimension=1 c = add(a, b) d = f32[5,7] broadcast(p), dimensions={0} mask = multiply(c,d) while_init = tuple(const_0, mask) ROOT while = while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopFusibleSinking{}.Run(module.get())); ASSERT_TRUE(changed); auto* while_body = module->GetComputationWithName("body"); EXPECT_THAT(while_body->root_instruction(), op::Tuple(op::Add(_, op::Multiply(op::Add(op::Iota(), op::Iota()), op::Broadcast())), _, _)); } TEST_F(WhileLoopFusibleSinkingTest, NoSinkSlicedMask) { const char* const hlo_string = R"( HloModule ModuleWithWhile body { p_body = (f32[5,7],f32[5,7]) parameter(0) p_body.0 = get-tuple-element(p_body), index=0 p_body.1 = get-tuple-element(p_body), index=1 z = s32[] constant(0) j = s32[] constant(3) ds = f32[1,7] dynamic-slice(p_body.1, j, z), dynamic_slice_sizes={1,7} r = f32[7] reshape(ds) b = f32[5,7] broadcast(r), dimensions={1} a = add(b, p_body.0) add.0 = add(a, p_body.1) ROOT root = tuple(add.0, p_body.1) } condition { p_cond = (f32[5,7],f32[5,7]) parameter(0) ROOT result = pred[] constant(true) } ENTRY entry { const_0 = f32[5,7] parameter(0) p = f32[5] parameter(1) a = f32[5,7] iota(), iota_dimension=0 b = f32[5,7] iota(), iota_dimension=1 c = add(a, b) d = f32[5,7] broadcast(p), dimensions={0} mask = multiply(c,d) while_init = tuple(const_0, mask) ROOT while = while(while_init), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopFusibleSinking{}.Run(module.get())); EXPECT_FALSE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_fusible_sinking_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
555aed98-8f55-421e-badb-302e32069668
cpp
tensorflow/tensorflow
reduce_scatter_decomposer
third_party/xla/xla/service/reduce_scatter_decomposer.cc
third_party/xla/xla/service/reduce_scatter_decomposer_test.cc
#include "xla/service/reduce_scatter_decomposer.h" #include <sys/types.h> #include <limits> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/literal_util.h" #include "xla/service/collective_decomposer_utils.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/hlo_module_config.h" #include "xla/shape_util.h" namespace xla { absl::StatusOr<bool> ReduceScatterDecomposer::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { bool changed = false; int64_t next_channel_id = hlo_query::NextChannelId(*module); for (HloComputation *computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction *instruction : computation->MakeInstructionPostOrder()) { auto *rs = DynCast<HloReduceScatterInstruction>(instruction); if (!rs || !rs->shape().IsArray()) { continue; } std::optional<int64_t> channel_id; if (rs->channel_id()) { channel_id = next_channel_id++; } if (should_decompose_ && !should_decompose_(rs)) { continue; } VLOG(2) << "Decompose: " << rs->ToString(); HloComputation *apply_clone = module->AddComputationAndUnifyNamesAndIds( rs->to_apply()->Clone(), false); HloInstruction *ar = computation->AddInstruction(HloInstruction::CreateAllReduce( rs->operand(0)->shape(), rs->operands(), apply_clone, rs->device_list(), rs->constrain_layout(), channel_id, rs->use_global_device_ids())); apply_clone->SetCollectiveCallInstruction(ar); TF_ASSIGN_OR_RETURN( CollectiveOpGroupMode group_mode, GetCollectiveOpGroupMode(rs->channel_id().has_value(), rs->use_global_device_ids())); TF_ASSIGN_OR_RETURN( std::vector<HloInstruction *> start_indices, CreateStartIndicesForCollectiveDecomposition( group_mode, rs->replica_groups(), rs->shape(), rs->scatter_dimension(), computation, update_layout_)); HloInstruction *ds = computation->AddInstruction(HloInstruction::CreateDynamicSlice( rs->shape(), ar, start_indices, rs->shape().dimensions())); TF_RETURN_IF_ERROR(rs->ReplaceAllUsesWith(ds)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs)); changed = true; } } return changed; } }
#include "xla/service/reduce_scatter_decomposer.h" #include <utility> #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal_util.h" #include "xla/service/collective_ops_utils.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class ReduceScatterDecomposerTest : public HloTestBase { public: enum class PassAction { kNoChange, kTrivialGroups, kTableLookup, }; void RunPass( absl::string_view hlo_module, PassAction action, CollectiveOpGroupMode mode = CollectiveOpGroupMode::kCrossReplica, int64_t shard_size = 0, int64_t shard_dimension = 0, int64_t replica_count = 2, std::function<bool(const HloInstruction *)> should_decompose = [](const HloInstruction *) { return true; }) { const int64_t partition_count = 2; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(hlo_module, replica_count, partition_count)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ReduceScatterDecomposer(nullptr, should_decompose) .Run(module.get())); if (action == PassAction::kNoChange) { ASSERT_FALSE(changed); return; } ASSERT_TRUE(changed); Literal multiplier = LiteralUtil::CreateR0<uint32_t>(shard_size); ::testing::Matcher<const ::xla::HloInstruction *> id_matcher = [&]() { switch (mode) { case CollectiveOpGroupMode::kCrossPartition: return op::PartitionId(); case CollectiveOpGroupMode::kCrossReplica: return op::ReplicaId(); case CollectiveOpGroupMode::kCrossReplicaAndPartition: return op::ReplicaId(); case CollectiveOpGroupMode::kFlattenedID: { return op::Add( op::Multiply(op::ReplicaId(), op::Constant(LiteralUtil::CreateR0<uint32_t>( partition_count))), op::PartitionId()); } } }(); auto root = module->entry_computation()->root_instruction(); const Shape &shape = root->shape(); ::testing::Matcher<const ::xla::HloInstruction *> slice_index = id_matcher; if (action == PassAction::kTableLookup) { slice_index = op::Reshape(op::DynamicSlice(op::Constant(), id_matcher)); } if (mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) { slice_index = op::Add( op::Multiply( slice_index, op::Constant(LiteralUtil::CreateR0<uint32_t>(partition_count))), op::PartitionId()); } auto zero_matcher = op::Constant(LiteralUtil::Zero(U32)); std::vector<::testing::Matcher<const ::xla::HloInstruction *>> ds_operands( shape.rank() + 1, zero_matcher); ds_operands[0] = op::AllReduce(op::Parameter(0)); ds_operands[shard_dimension + 1] = op::Multiply(slice_index, op::Constant(std::move(multiplier))); EXPECT_THAT(root, op::DynamicSlice(ds_operands)); } }; TEST_F(ReduceScatterDecomposerTest, TrivialReplicaID) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0}, to_apply=sum } )"; RunPass(hlo_string, PassAction::kTrivialGroups, CollectiveOpGroupMode::kCrossReplica, 4); } TEST_F(ReduceScatterDecomposerTest, TableLookupReplicaId) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{1, 0}}, dimensions={0}, to_apply=sum } )"; RunPass(hlo_string, PassAction::kTableLookup, CollectiveOpGroupMode::kCrossReplica, 4); } TEST_F(ReduceScatterDecomposerTest, TrivialCrossReplicaAndPartition) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[4, 8] parameter(0) ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{0, 1}}, channel_id=1, dimensions={1}, to_apply=sum } )"; RunPass(hlo_string, PassAction::kTrivialGroups, CollectiveOpGroupMode::kCrossReplicaAndPartition, 2, 1); } TEST_F(ReduceScatterDecomposerTest, TrivialCrossReplicaAndPartition_SingleReplica) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[4, 8] parameter(0) ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0}}, channel_id=1, dimensions={1}, to_apply=sum } )"; RunPass(hlo_string, PassAction::kTrivialGroups, CollectiveOpGroupMode::kCrossPartition, 4, 1, 1); } TEST_F(ReduceScatterDecomposerTest, TableLookupFlattenedId) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[4, 8] parameter(0) ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true } )"; RunPass(hlo_string, PassAction::kTableLookup, CollectiveOpGroupMode::kFlattenedID, 2, 1); } TEST_F(ReduceScatterDecomposerTest, NoChange) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[4, 8] parameter(0) ROOT rs = (f32[4, 2], f32[4,2]) reduce-scatter(p0, p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true } )"; RunPass(hlo_string, PassAction::kNoChange); } TEST_F(ReduceScatterDecomposerTest, NoChangeWithShouldDecompose) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[4, 8] parameter(0) ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0,1}, {2,3}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true } )"; RunPass(hlo_string, PassAction::kNoChange, CollectiveOpGroupMode::kCrossReplica, 0, 0, 2, [](const HloInstruction *) { return false; }); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7d1d8932-f15f-4fd7-a669-a3307f9aec2f
cpp
tensorflow/tensorflow
while_loop_concat_code_motion
third_party/xla/xla/service/while_loop_concat_code_motion.cc
third_party/xla/xla/service/while_loop_concat_code_motion_test.cc
#include "xla/service/while_loop_concat_code_motion.h" #include <map> #include <optional> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/hlo_dce.h" #include "xla/service/tuple_simplifier.h" #include "xla/service/while_loop_simplifier.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" namespace xla { namespace { struct ConcatGroup { ConcatGroup(std::vector<HloInstruction*> elements, int64_t concat_dim, bool inserted_concat_dim) : elements(std::move(elements)), element_sizes(this->elements.size(), 1), element_offsets(this->elements.size(), 0), concat_dim(concat_dim), inserted_concat_dim(inserted_concat_dim) { if (inserted_concat_dim) { absl::c_iota(element_offsets, 0); } else { for (int64_t i = 0; i < element_sizes.size(); ++i) { element_sizes[i] = this->elements[i]->shape().dimensions(concat_dim); if (i > 0) { element_offsets[i] = element_offsets[i - 1] + element_sizes[i - 1]; } } } } Shape GetConcatShape() const { if (inserted_concat_dim) { std::vector<int64_t> dims; const Shape& element_shape = elements.back()->shape(); dims.reserve(element_shape.rank() + 1); for (int64_t i = 0; i < element_shape.rank(); ++i) { if (i == concat_dim) { dims.push_back(elements.size()); } dims.push_back(element_shape.dimensions(i)); } if (dims.size() == concat_dim) { dims.push_back(elements.size()); } return ShapeUtil::MakeShape(element_shape.element_type(), dims); } else { int64_t dim_size = 0; for (int64_t size : element_sizes) { dim_size += size; } Shape shape = elements.back()->shape(); shape.set_dimensions(concat_dim, dim_size); return shape; } } HloInstruction* CreateSlice(HloInstruction* full_data, int64_t element_index, HloComputation* comp) const { Shape shape = full_data->shape(); shape.set_dimensions(concat_dim, element_sizes[element_index]); std::vector<int64_t> starts(shape.rank(), 0); std::vector<int64_t> limits(shape.dimensions().begin(), shape.dimensions().end()); starts[concat_dim] = element_offsets[element_index]; limits[concat_dim] += starts[concat_dim]; auto slice = comp->AddInstruction( HloInstruction::CreateSlice(shape, full_data, starts, limits, std::vector<int64_t>(shape.rank(), 1))); if (!inserted_concat_dim) { return slice; } std::vector<int64_t> element_shape; element_shape.reserve(shape.rank() - 1); for (int64_t i = 0; i < shape.rank(); ++i) { if (i != concat_dim) { element_shape.push_back(shape.dimensions(i)); } } return comp->AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(shape.element_type(), element_shape), slice)); } HloInstruction* CreateConcat(std::vector<HloInstruction*> input_elements, HloComputation* comp) const { if (inserted_concat_dim) { for (int64_t i = 0; i < input_elements.size(); ++i) { std::vector<int64_t> element_shape; element_shape.reserve(input_elements[i]->shape().rank() + 1); for (int64_t j = 0; j < input_elements[i]->shape().rank(); ++j) { if (j == concat_dim) { element_shape.push_back(1); } element_shape.push_back(input_elements[i]->shape().dimensions(j)); } if (element_shape.size() == concat_dim) { element_shape.push_back(1); } input_elements[i] = comp->AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(input_elements[i]->shape().element_type(), element_shape), input_elements[i])); } } return comp->AddInstruction(HloInstruction::CreateConcatenate( GetConcatShape(), input_elements, concat_dim)); } std::vector<HloInstruction*> elements; std::vector<int64_t> element_sizes; std::vector<int64_t> element_offsets; int64_t concat_dim; bool inserted_concat_dim; }; class ConcatGroups { public: std::optional<std::pair<int64_t, int64_t>> GetGroupIndex( const HloInstruction* hlo) const { auto it = element_to_group_.find(hlo); if (it == element_to_group_.end()) { return std::nullopt; } return it->second; } const ConcatGroup& GetGroup(int64_t index) const { return groups_[index]; } std::pair<bool, int64_t> MaybeCreateNewGroup(ConcatGroup group) { int64_t group_id = -1; absl::flat_hash_set<HloInstruction*> elements_dedup; for (int64_t i = 0; i < group.elements.size(); ++i) { if (!elements_dedup.insert(group.elements[i]).second) { VLOG(2) << "Duplicates in group. Element: " << group.elements[i]->ToString(); } if (concat_disallowed_.contains(group.elements[i])) { VLOG(2) << "Failed creating group. Grouping disallowed on " << group.elements[i]->ToString(); return std::pair<bool, int64_t>(false, -1); } auto existing = GetGroupIndex(group.elements[i]); if (existing.has_value() && (i != existing->second || groups_[existing->first].concat_dim != group.concat_dim)) { VLOG(2) << "Failed creating group. Different than existing group. Element: " << group.elements[i]->ToString(); return std::pair<bool, int64_t>(false, -1); } if (i == 0 && existing.has_value()) { group_id = existing->first; } if (i > 0) { if (existing.has_value() && existing->first != group_id) { VLOG(2) << "Failed creating group. Different than existing group. " "Element: " << group.elements[i]->ToString(); return std::pair<bool, int64_t>(false, -1); } if (!existing.has_value() && group_id >= 0) { VLOG(2) << "Failed creating group. Different than existing group. " "Element: " << group.elements[i]->ToString(); return std::pair<bool, int64_t>(false, -1); } } } if (group_id >= 0) { VLOG(2) << "Group already exists at " << group_id << " for " << group.elements[0]->ToString(); return std::pair<bool, int64_t>(false, group_id); } int64_t index = groups_.size(); for (int64_t i = 0; i < group.elements.size(); ++i) { element_to_group_[group.elements[i]] = std::pair<int64_t, int64_t>(index, i); } VLOG(2) << "Created new group at " << index << " for " << group.elements[0]->ToString() << ", concat_dim: " << group.concat_dim << ", inserted: " << group.inserted_concat_dim; groups_.push_back(std::move(group)); return std::pair<bool, int64_t>(true, index); } const std::vector<ConcatGroup>& Groups() const { return groups_; } int64_t NextGroupIndex() const { return groups_.size(); } void RemoveTailingGroups(int64_t start_index) { while (groups_.size() > start_index) { for (auto element : groups_.back().elements) { element_to_group_.erase(element); } groups_.pop_back(); } } void DisallowGroupingOn(const HloInstruction* hlo) { VLOG(2) << "Disallow grouping on " << hlo->ToString(); concat_disallowed_.insert(hlo); } private: absl::flat_hash_map<const HloInstruction*, std::pair<int64_t, int64_t>> element_to_group_; std::vector<ConcatGroup> groups_; absl::flat_hash_set<const HloInstruction*> concat_disallowed_; }; std::optional<std::pair<int64_t, bool>> GetOperandConcatDim( const HloInstruction* hlo, int64_t operand_index, int64_t hlo_concat_dim, bool hlo_inserted_concat_dim, const ConcatGroup* combined_operand_group = nullptr) { if (hlo->IsElementwise() || hlo->opcode() == HloOpcode::kAllReduce) { return std::pair<int64_t, bool>(hlo_concat_dim, hlo_inserted_concat_dim); } int64_t operand_concat_dim = -1; bool operand_inserted_concat_dim = false; const Shape& operand_shape = combined_operand_group == nullptr ? hlo->operand(operand_index)->shape() : combined_operand_group->elements.back()->shape(); if (hlo->opcode() == HloOpcode::kBroadcast) { operand_concat_dim = 0; operand_inserted_concat_dim = true; int64_t min_dist_to_concat_dim = hlo->shape().rank(); for (int64_t i = 0; i < operand_shape.rank(); ++i) { if (hlo->dimensions(i) == hlo_concat_dim) { operand_concat_dim = i; operand_inserted_concat_dim = hlo_inserted_concat_dim; break; } if (hlo->dimensions(i) < hlo_concat_dim && min_dist_to_concat_dim > hlo_concat_dim - hlo->dimensions(i)) { operand_concat_dim = i + 1; min_dist_to_concat_dim = hlo_concat_dim - hlo->dimensions(i); } if (hlo->dimensions(i) > hlo_concat_dim && min_dist_to_concat_dim > hlo->dimensions(i) - hlo_concat_dim) { operand_concat_dim = i; min_dist_to_concat_dim = hlo->dimensions(i) - hlo_concat_dim; } } } else if (hlo->opcode() == HloOpcode::kReduce) { if (operand_index != 0) { return std::nullopt; } operand_concat_dim = hlo_concat_dim; operand_inserted_concat_dim = hlo_inserted_concat_dim; std::set<int64_t> sorted_reduce_dims; for (int64_t dim : hlo->dimensions()) { sorted_reduce_dims.insert(dim); } for (int64_t dim : sorted_reduce_dims) { if ((hlo_inserted_concat_dim && dim < operand_concat_dim) || (!hlo_inserted_concat_dim && dim <= operand_concat_dim)) { operand_concat_dim++; } } } else if (hlo->opcode() == HloOpcode::kReshape) { int64_t i = 0; int64_t j = 0; operand_inserted_concat_dim = false; while (i < operand_shape.rank() || j <= hlo_concat_dim) { if (i < operand_shape.rank() && j < hlo->shape().rank() && operand_shape.dimensions(i) == hlo->shape().dimensions(j)) { if (j == hlo_concat_dim) { operand_inserted_concat_dim = hlo_inserted_concat_dim && operand_shape.dimensions(i) != 1; operand_concat_dim = i; break; } i++; j++; continue; } if (i < operand_shape.rank() && operand_shape.dimensions(i) == 1) { if (j == hlo_concat_dim && hlo_inserted_concat_dim) { operand_concat_dim = i; break; } i++; continue; } if (j == hlo_concat_dim) { operand_concat_dim = i; operand_inserted_concat_dim = true; break; } if (j < hlo->shape().rank() && hlo->shape().dimensions(j) == 1) { j++; continue; } return std::nullopt; } } else { return std::nullopt; } CHECK_GE(operand_concat_dim, 0); return std::pair<int64_t, bool>(operand_concat_dim, operand_inserted_concat_dim); } void ModifyHloPropertiesForConcatShape(const ConcatGroup& group, HloInstruction* hlo) { *hlo->mutable_shape() = group.GetConcatShape(); if (hlo->opcode() == HloOpcode::kBroadcast) { auto operand_dim = GetOperandConcatDim( group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim); CHECK(operand_dim.has_value()); int64_t operand_concat_dim = operand_dim->first; bool operand_inserted_concat_dim = operand_dim->second; if (operand_inserted_concat_dim) { CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size() + 1) << hlo->ToString(); } else { CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size()); } std::vector<int64_t> dims; const int64_t rank = hlo->operand(0)->shape().rank(); dims.reserve(rank); for (int64_t i = 0; i < rank; ++i) { if (i == operand_concat_dim && operand_inserted_concat_dim) { dims.push_back(group.concat_dim); } else { if (i > operand_concat_dim && operand_inserted_concat_dim) { dims.push_back(hlo->dimensions(i - 1)); } else { dims.push_back(hlo->dimensions(i)); } if (group.inserted_concat_dim && dims.back() >= group.concat_dim) { dims.back()++; } } } *hlo->mutable_dimensions() = std::move(dims); } else if (hlo->opcode() == HloOpcode::kReduce) { auto operand_dim = GetOperandConcatDim( group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim); int64_t operand_concat_dim = operand_dim->first; bool operand_inserted_concat_dim = operand_dim->second; CHECK(operand_dim.has_value()); if (operand_inserted_concat_dim) { auto dims = hlo->mutable_dimensions(); for (int64_t i = 0; i < dims->size(); ++i) { if ((*dims)[i] >= operand_concat_dim) { (*dims)[i]++; } } } } } bool GroupHlosForConcat( HloComputation* body, HloInstruction* concat, absl::flat_hash_map<const HloInstruction*, int64_t> topological_order, ConcatGroups* groups) { const int64_t group_size = concat->operand_count(); absl::flat_hash_set<int64_t> used_groups; auto root_tuple = body->root_instruction(); CHECK_EQ(root_tuple->opcode(), HloOpcode::kTuple); absl::flat_hash_map<HloInstruction*, int64_t> root_tuple_element_use_count; for (auto operand : root_tuple->operands()) { root_tuple_element_use_count.emplace(operand, 0).first->second++; } std::multimap<int64_t, ConcatGroup> pq; const int64_t first_group_id_to_create = groups->NextGroupIndex(); auto fail_and_cleanup = [&] { VLOG(1) << "Failed to get the subcomputation to optimize for " << concat->ToString() << ", clear groups starting at " << first_group_id_to_create; groups->RemoveTailingGroups(first_group_id_to_create); return false; }; struct GroupUse { int64_t group_id; bool newly_created; bool already_used_by_subcomp; }; auto maybe_create_group = [&](ConcatGroup group) { auto res = groups->MaybeCreateNewGroup(std::move(group)); GroupUse use{res.second, false, false}; if (res.second < 0) { return use; } use.newly_created = res.first; use.already_used_by_subcomp = !used_groups.insert(res.second).second; return use; }; std::vector<HloInstruction*> concat_operands(concat->operands().begin(), concat->operands().end()); int64_t concat_operand_order = -topological_order[concat_operands[0]]; pq.emplace(concat_operand_order, ConcatGroup(std::move(concat_operands), concat->concatenate_dimension(), false)); while (!pq.empty()) { auto group = std::move(pq.begin()->second); pq.erase(pq.begin()); const auto& hlos = group.elements; VLOG(2) << "GroupHlosForConcat dequeued " << hlos[0]->ToString(); bool group_is_param_gtes = false; if (absl::c_all_of(hlos, [&](const HloInstruction* element) { return element == hlos[0]; })) { if (groups->GetGroupIndex(hlos[0]).has_value()) { VLOG(1) << "We do not support the case if a shared operand also part " "of a group: " << hlos[0]->ToString(); return fail_and_cleanup(); } groups->DisallowGroupingOn(hlos[0]); continue; } if (absl::c_all_of(hlos, [&](const HloInstruction* element) { return element->opcode() == HloOpcode::kGetTupleElement && element->operand(0) == body->parameter_instruction(0); })) { group_is_param_gtes = true; } else if (((hlos[0]->IsElementwise() || hlos[0]->opcode() == HloOpcode::kAllReduce) && !hlos[0]->HasSideEffect()) || hlos[0]->opcode() == HloOpcode::kBroadcast || hlos[0]->opcode() == HloOpcode::kReduce || hlos[0]->opcode() == HloOpcode::kReshape || hlos[0]->IsCustomCall("Sharding")) { if (hlos[0]->opcode() == HloOpcode::kAllReduce && (!hlos[0]->shape().IsArray() || hlos[0]->IsCrossModuleAllReduce())) { VLOG(2) << "Unsupported allreduce: " << hlos[0]->ToString(); return fail_and_cleanup(); } if (absl::c_any_of(hlos, [&](const HloInstruction* element) { auto eq_operand = [](const HloInstruction* a, const HloInstruction* b) { return ShapeUtil::Compatible(a->shape(), b->shape()); }; auto eq_computations = [](const HloComputation* lhs, const HloComputation* rhs) { return lhs->Equal(*rhs, false); }; if (!hlos[0]->Identical(*element, eq_operand, eq_computations, false)) { return true; } if (element->opcode() == HloOpcode::kReduce && (element->operand_count() != 2 || element->operand(1) != hlos[0]->operand(1))) { return true; } return false; })) { VLOG(2) << "Different types of elements. First element: " << hlos[0]->ToString(); return fail_and_cleanup(); } int64_t input_count = hlos[0]->operand_count(); if (hlos[0]->opcode() == HloOpcode::kReduce) { CHECK_EQ(input_count, 2); input_count = 1; } for (int64_t i = 0; i < input_count; ++i) { std::vector<HloInstruction*> elements(group_size); for (int64_t j = 0; j < group_size; ++j) { elements[j] = hlos[j]->mutable_operand(i); } auto maybe_new_concat_dim = GetOperandConcatDim( hlos[0], i, group.concat_dim, group.inserted_concat_dim); if (!maybe_new_concat_dim.has_value()) { VLOG(2) << "Cannot find operand concat dimension for operand " << i << " of " << hlos[0]->ToString(); return fail_and_cleanup(); } int64_t new_group_concat_dim = maybe_new_concat_dim->first; bool inserted_concat_dim = maybe_new_concat_dim->second; int64_t element_order = -topological_order[elements[0]]; pq.emplace(element_order, ConcatGroup(std::move(elements), new_group_concat_dim, inserted_concat_dim)); } } else if (hlos[0]->opcode() == HloOpcode::kSlice) { int64_t offset = 0; auto operand = hlos[0]->operand(0); if (group.inserted_concat_dim) { VLOG(2) << "Slices cannot be grouped on new dimension."; return fail_and_cleanup(); } if (groups->GetGroupIndex(operand).has_value()) { return fail_and_cleanup(); } groups->DisallowGroupingOn(operand); for (int64_t i = 0; i < group_size; ++i) { if (hlos[i]->operand(0) != operand) { VLOG(2) << "Slices of different operands."; return fail_and_cleanup(); } for (int64_t j = 0; j < hlos[i]->shape().rank(); ++j) { if (hlos[i]->slice_strides(j) != 1) { VLOG(2) << "Slices with strides."; return fail_and_cleanup(); } if (j == group.concat_dim) { if (hlos[i]->slice_starts(j) != offset) { VLOG(2) << "Slices with unsupported offsets."; return fail_and_cleanup(); } offset += hlos[i]->shape().dimensions(j); } else { if (hlos[i]->slice_starts(j) != 0 || hlos[i]->slice_limits(j) != operand->shape().dimensions(j)) { VLOG(2) << "Slice with unsupported offsets at dimension " << j << ", " << hlos[i]->ToString(); return fail_and_cleanup(); } } } } if (offset != operand->shape().dimensions(group.concat_dim)) { VLOG(2) << "Slices with unsupported sizes."; return fail_and_cleanup(); } } else { VLOG(2) << "Unsupported opcode: " << hlos[0]->ToString(); return fail_and_cleanup(); } auto guse = maybe_create_group(std::move(group)); if (guse.group_id < 0) { VLOG(2) << "Failed to create group."; return fail_and_cleanup(); } const auto& registered_group = groups->GetGroup(guse.group_id); if (!guse.already_used_by_subcomp && group_is_param_gtes) { std::vector<HloInstruction*> new_outputs(group_size); for (int64_t i = 0; i < group_size; ++i) { new_outputs[i] = root_tuple->mutable_operand( registered_group.elements[i]->tuple_index()); } int64_t new_output_order = -topological_order[new_outputs[0]]; pq.emplace( new_output_order, ConcatGroup(std::move(new_outputs), registered_group.concat_dim, registered_group.inserted_concat_dim)); } } return groups->Groups().size() > first_group_id_to_create; } std::vector<bool> TupleElementsUsedInCond(HloInstruction* loop) { std::vector<bool> result(loop->shape().tuple_shapes_size(), false); for (auto user : loop->while_condition()->parameter_instruction(0)->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { absl::c_fill(result, true); return result; } result[user->tuple_index()] = true; } return result; } absl::Status AddCopiesToRoot(HloComputation* body, absl::Span<HloInstruction* const> param_gtes, ConcatGroups* groups) { auto root = body->root_instruction(); CHECK_EQ(root->opcode(), HloOpcode::kTuple); std::vector<HloInstruction*> copies(root->operand_count(), nullptr); for (int64_t i = 0; i < copies.size(); ++i) { auto element = root->mutable_operand(i); if (!element->shape().IsArray()) { continue; } copies[i] = body->AddInstruction(HloInstruction::CreateUnary( element->shape(), HloOpcode::kCopy, element)); TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copies[i])); } for (int64_t i = 0; i < copies.size(); ++i) { auto copy = copies[i]; if (groups->GetGroupIndex(copy).has_value()) { continue; } auto param_group_index = groups->GetGroupIndex(param_gtes[i]); if (!param_group_index.has_value()) { continue; } const auto& param_group = groups->GetGroup(param_group_index->first); std::vector<HloInstruction*> copy_group(param_group.elements.size()); for (int64_t j = 0; j < copy_group.size(); ++j) { copy_group[j] = copies[param_group.elements[j]->tuple_index()]; } CHECK(groups ->MaybeCreateNewGroup( ConcatGroup(std::move(copy_group), param_group.concat_dim, param_group.inserted_concat_dim)) .first); } return absl::OkStatus(); } absl::Status RemoveCopiesFromRoot(HloComputation* body) { auto root = body->root_instruction(); CHECK_EQ(root->opcode(), HloOpcode::kTuple); for (int64_t i = 0; i < root->operand_count(); ++i) { auto copy = root->mutable_operand(i); if (copy->opcode() == HloOpcode::kCopy) { TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copy->mutable_operand(0))); } } return absl::OkStatus(); } absl::Status RewriteLoopWithConcatGroups( HloInstruction* loop, absl::Span<HloInstruction* const> param_gtes, ConcatGroups& groups) { VLOG(1) << "RewriteLoopWithConcatGroups with " << groups.Groups().size() << " groups."; absl::flat_hash_set<int64_t> processed_groups; auto body = loop->while_body(); auto param = body->parameter_instruction(0); auto cond_param = loop->while_condition()->parameter_instruction(0); std::vector<HloInstruction*> init_elements(loop->shape().tuple_shapes_size()); for (int64_t i = 0; i < param_gtes.size(); ++i) { init_elements[i] = loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement( loop->shape().tuple_shapes(i), loop->mutable_operand(0), i)); } for (int64_t i = 0; i < param_gtes.size(); ++i) { const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]); if (!group_and_index.has_value() || group_and_index->second != 0) { continue; } const auto& group = groups.GetGroup(group_and_index->first); *param_gtes[i]->mutable_shape() = group.GetConcatShape(); *param->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape(); *body->root_instruction()->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape(); *cond_param->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape(); *loop->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape(); processed_groups.insert(group_and_index->first); std::vector<HloInstruction*> input_concat_elements; input_concat_elements.reserve(group.elements.size()); for (auto param_gte : group.elements) { input_concat_elements.push_back(init_elements[param_gte->tuple_index()]); } init_elements[i] = group.CreateConcat(std::move(input_concat_elements), loop->parent()); } TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape( 0, loop->parent()->AddInstruction( HloInstruction::CreateTuple(init_elements)))); auto original_loop_users = loop->users(); const bool loop_is_root = loop == loop->parent()->root_instruction(); std::vector<HloInstruction*> output_elements( loop->shape().tuple_shapes_size()); for (int64_t i = 0; i < param_gtes.size(); ++i) { output_elements[i] = loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement( init_elements[i]->shape(), loop, i)); } for (int64_t i = 0; i < param_gtes.size(); ++i) { const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]); if (!group_and_index.has_value() || group_and_index->second != 0) { continue; } const auto& group = groups.GetGroup(group_and_index->first); auto concat_output = output_elements[group.elements[0]->tuple_index()]; for (int64_t j = 0; j < group.elements.size(); ++j) { const auto param_gte = group.elements[j]; output_elements[param_gte->tuple_index()] = group.CreateSlice(concat_output, j, loop->parent()); } } auto new_output_tuple = loop->parent()->AddInstruction( HloInstruction::CreateTuple(output_elements)); for (auto user : original_loop_users) { TF_RETURN_IF_ERROR( loop->ReplaceUseWithDifferentShape(user, new_output_tuple)); } if (loop_is_root) { loop->parent()->set_root_instruction(new_output_tuple, true); } std::vector<HloInstruction*> slices_to_remove; absl::flat_hash_set<HloInstruction*> new_reshapes; for (auto hlo : body->MakeInstructionPostOrder()) { const auto& group_and_index = groups.GetGroupIndex(hlo); if (!group_and_index.has_value() || group_and_index->second != 0) { continue; } if (!processed_groups.insert(group_and_index->first).second) { continue; } const auto& group = groups.GetGroup(group_and_index->first); if (hlo->opcode() == HloOpcode::kSlice) { slices_to_remove.push_back(hlo); } else { int64_t operand_count_to_adjust = hlo->operand_count(); if (hlo->opcode() == HloOpcode::kReduce) { CHECK_EQ(operand_count_to_adjust, 2); operand_count_to_adjust = 1; } for (int64_t i = 0; i < operand_count_to_adjust; ++i) { auto operand_group_index = groups.GetGroupIndex(hlo->operand(i)); const ConcatGroup* operand_group = operand_group_index.has_value() ? &groups.GetGroup(operand_group_index->first) : nullptr; auto maybe_operand_concat_dim = GetOperandConcatDim( hlo, i, group.concat_dim, group.inserted_concat_dim, operand_group); CHECK(maybe_operand_concat_dim.has_value()) << "Operand " << i << " of " << hlo->ToString(); int64_t operand_concat_dim = maybe_operand_concat_dim->first; bool operand_inserted_concat_dim = maybe_operand_concat_dim->second; if (operand_group != nullptr) { CHECK_EQ(operand_concat_dim, operand_group->concat_dim); if (operand_inserted_concat_dim != operand_group->inserted_concat_dim) { std::vector<int64_t> new_dims; int64_t d = 0; for (; d < operand_concat_dim; ++d) { new_dims.push_back(hlo->operand(i)->shape().dimensions(d)); } if (operand_inserted_concat_dim) { new_dims.push_back(group.elements.size()); new_dims.push_back( hlo->operand(i)->shape().dimensions(operand_concat_dim) / group.elements.size()); d = operand_concat_dim + 1; } else { new_dims.push_back( group.elements.size() * hlo->operand(i)->shape().dimensions(operand_concat_dim + 1)); d = operand_concat_dim + 2; } for (; d < hlo->operand(i)->shape().rank(); ++d) { new_dims.push_back(hlo->operand(i)->shape().dimensions(d)); } auto reshape = body->AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(hlo->operand(i)->shape().element_type(), new_dims), hlo->mutable_operand(i))); new_reshapes.insert(reshape); TF_RETURN_IF_ERROR( hlo->ReplaceOperandWithDifferentShape(i, reshape)); } continue; } CHECK( absl::c_all_of(group.elements, [&](const HloInstruction* element) { return element->operand(i) == hlo->operand(i); })); VLOG(2) << "Broadcasting shared operand " << hlo->operand(i)->ToString(); Shape data_shape = hlo->operand(i)->shape(); std::vector<int64_t> broadcast_dims; std::vector<int64_t> broadcast_shape; const int64_t data_shape_rank = data_shape.rank(); broadcast_dims.reserve(data_shape_rank); broadcast_shape.reserve(data_shape_rank + 1); for (int64_t j = 0; j < data_shape_rank; ++j) { if (j < operand_concat_dim) { broadcast_dims.push_back(j); } else { broadcast_dims.push_back(j + 1); } if (j == operand_concat_dim) { broadcast_shape.push_back(group.elements.size()); } broadcast_shape.push_back(data_shape.dimensions(j)); } if (broadcast_shape.size() == data_shape.rank()) { broadcast_shape.push_back(group.elements.size()); } auto broadcast = body->AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(data_shape.element_type(), broadcast_shape), hlo->mutable_operand(i), broadcast_dims)); if (!operand_inserted_concat_dim) { data_shape.set_dimensions( operand_concat_dim, data_shape.dimensions(operand_inserted_concat_dim) * group.elements.size()); broadcast = body->AddInstruction( HloInstruction::CreateReshape(data_shape, broadcast)); } TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, broadcast)); } } VLOG(2) << "Modifying HLO to full shape " << hlo->ToString(); ModifyHloPropertiesForConcatShape(group, hlo); VLOG(2) << "Modified HLO to full shape " << hlo->ToString(); } for (auto hlo : body->MakeInstructionPostOrder()) { if (new_reshapes.contains(hlo)) { continue; } const auto& group_and_index = groups.GetGroupIndex(hlo); if ((!group_and_index.has_value() || hlo->opcode() == HloOpcode::kReduce) && hlo != body->root_instruction()) { auto operands = hlo->operands(); if (group_and_index.has_value()) { CHECK_EQ(operands.size(), 2); CHECK_EQ(hlo->opcode(), HloOpcode::kReduce); operands.erase(operands.begin()); } for (int64_t i = 0; i < operands.size(); ++i) { auto operand = operands[i]; auto operand_group_index = groups.GetGroupIndex(operand); if (!operand_group_index.has_value()) { continue; } const auto& operand_group = groups.GetGroup(operand_group_index->first); auto slice = operand_group.CreateSlice( operand_group.elements[0], operand_group_index->second, body); TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, slice)); } } } for (auto slice : slices_to_remove) { TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(slice->mutable_operand(0))); TF_RETURN_IF_ERROR(body->RemoveInstruction(slice)); } return absl::OkStatus(); } absl::StatusOr<bool> RunOnLoop(HloInstruction* loop, int64_t min_operand_count_to_optimize) { auto body = loop->while_body(); auto param = body->parameter_instruction(0); auto root = body->root_instruction(); if (!param->shape().IsTuple() || root->opcode() != HloOpcode::kTuple) { return false; } std::vector<HloInstruction*> gtes(param->shape().tuple_shapes_size(), nullptr); ConcatGroups groups; auto indices_used_in_cond = TupleElementsUsedInCond(loop); for (auto user : param->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { return false; } int64_t idx = user->tuple_index(); if (gtes[idx] != nullptr) { return false; } gtes[idx] = user; if (indices_used_in_cond[idx]) { groups.DisallowGroupingOn(user); } } std::vector<HloInstruction*> concats; auto body_instructions = body->MakeInstructionPostOrder(); absl::flat_hash_map<const HloInstruction*, int64_t> topological_order; for (int64_t i = 0; i < body_instructions.size(); ++i) { auto hlo = body_instructions[i]; topological_order[hlo] = i; if (hlo->opcode() == HloOpcode::kConcatenate && hlo->operand_count() >= min_operand_count_to_optimize) { concats.push_back(hlo); } } for (auto& concat : concats) { if (!GroupHlosForConcat(body, concat, topological_order, &groups)) { concat = nullptr; } } if (groups.Groups().empty()) { return false; } TF_RETURN_IF_ERROR(AddCopiesToRoot(body, gtes, &groups)); TF_RETURN_IF_ERROR(RewriteLoopWithConcatGroups(loop, gtes, groups)); for (auto concat : concats) { if (concat == nullptr) { continue; } auto new_slice = concat->mutable_operand(0); CHECK_EQ(new_slice->opcode(), HloOpcode::kSlice); TF_RETURN_IF_ERROR( concat->ReplaceAllUsesWith(new_slice->mutable_operand(0))); TF_RETURN_IF_ERROR(body->RemoveInstruction(concat)); } TF_RETURN_IF_ERROR(RemoveCopiesFromRoot(body)); for (auto gte : gtes) { auto group_index = groups.GetGroupIndex(gte); if (group_index.has_value() && group_index->second > 0) { TF_RETURN_IF_ERROR(root->ReplaceOperandWith(gte->tuple_index(), gte)); } } return true; } } absl::StatusOr<bool> WhileLoopConcatCodeMotion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeComputationPostOrder(execution_threads)) { for (HloInstruction* hlo : comp->MakeInstructionPostOrder()) { if (hlo->opcode() == HloOpcode::kWhile) { TF_ASSIGN_OR_RETURN(bool loop_changed, RunOnLoop(hlo, min_operand_count_to_optimize_)); changed |= loop_changed; } } } if (changed) { HloPassPipeline pipeline("loop-concat-motion-cleanup"); pipeline.AddPass<TupleSimplifier>(); pipeline.AddPass<HloDCE>(); pipeline.AddPass<WhileLoopSimplifier>(); pipeline.AddPass<TupleSimplifier>(); pipeline.AddPass<HloDCE>(); TF_RETURN_IF_ERROR(pipeline.Run(module, execution_threads).status()); } return changed; } }
#include "xla/service/while_loop_concat_code_motion.h" #include <algorithm> #include <iterator> #include "absl/algorithm/container.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_verifier.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" namespace xla { namespace { namespace op = ::xla::testing::opcode_matchers; class WhileLoopConcatCodeMotionTest : public HloTestBase {}; TEST_F(WhileLoopConcatCodeMotionTest, SimpleMotion) { constexpr absl::string_view kHloModule = R"( HloModule test %cond { %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %constant = s32[] constant(5) ROOT result = pred[] compare(%gte.0, %constant), direction=LT } %body { %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1 %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2 %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0} %ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test" %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]} %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]} %ccall2 = f32[1024,1024] custom-call(), custom_call_target="test2" %add.0 = f32[1024,1024] add(%slice.0, %ccall2) %add.1 = f32[1024,1024] add(%slice.1, %ccall2) %t0 = token[] after-all() %outfeed = token[] outfeed(%slice.1, %t0) %constant = s32[] constant(1) %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant) ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%increment_iteration, %add.0, %add.1) } ENTRY test_main { %param.0 = f32[1024,1024] parameter(0) %param.1 = f32[1024,1024] parameter(1) %constant.0 = s32[] constant(0) %while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1) ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConcatCodeMotion(2).Run(module.get())); ASSERT_TRUE(changed); VLOG(1) << module->ToString(); auto loop = op::While( op::Tuple(op::Constant(), AllOf(op::Shape("f32[2048,1024]"), op::Concatenate(op::Parameter(0), op::Parameter(1))))); ASSERT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)))); auto while_op = module->entry_computation()->root_instruction()->operand(0)->operand(0); EXPECT_THAT(while_op->while_body()->root_instruction(), op::Tuple(op::Add(), op::Add(op::CustomCall(), op::Reshape(op::Broadcast(op::CustomCall()))))); } TEST_F(WhileLoopConcatCodeMotionTest, NoMotionWithChangedElementOrder) { constexpr absl::string_view kHloModule = R"( HloModule test %cond { %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %constant = s32[] constant(5) ROOT result = pred[] compare(%gte.0, %constant), direction=LT } %body { %param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1 %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2 %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0} %ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test" %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]} %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]} %constant = s32[] constant(1) %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant) ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%increment_iteration, %slice.1, %slice.0) } ENTRY test_main { %param.0 = f32[1024,1024] parameter(0) %param.1 = f32[1024,1024] parameter(1) %constant.0 = s32[] constant(0) %while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1) ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConcatCodeMotion(2).Run(module.get())); ASSERT_FALSE(changed); } TEST_F(WhileLoopConcatCodeMotionTest, CascadedConcats) { constexpr absl::string_view kHloModule = R"( HloModule test %cond { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %constant = s32[] constant(5) ROOT result = pred[] compare(%gte.0, %constant), direction=LT } %body { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1 %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2 %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0} %gte.3 = f32[1024,1024] get-tuple-element(%param), index=3 %gte.4 = f32[1024,1024] get-tuple-element(%param), index=4 %ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test" %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]} %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]} %add.0 = f32[1024,1024] add(%slice.0, %gte.3) %add.1 = f32[1024,1024] add(%slice.1, %gte.4) %add.2 = f32[1024,1024] add(%gte.3, %gte.3) %add.3 = f32[1024,1024] add(%gte.4, %gte.4) %constant = s32[] constant(1) %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant) ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) tuple(%increment_iteration, %add.0, %add.1, %add.2, %add.3) } ENTRY test_main { %param.0 = f32[1024,1024] parameter(0) %param.1 = f32[1024,1024] parameter(1) %param.2 = f32[1024,1024] parameter(2) %param.3 = f32[1024,1024] parameter(3) %constant.0 = s32[] constant(0) %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1, %param.2, %param.3) ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConcatCodeMotion(2).Run(module.get())); ASSERT_TRUE(changed); VLOG(1) << module->ToString(); auto loop = op::While( op::Tuple(op::Constant(), AllOf(op::Shape("f32[2048,1024]"), op::Concatenate(op::Parameter(0), op::Parameter(1))), AllOf(op::Shape("f32[2048,1024]"), op::Concatenate(op::Parameter(2), op::Parameter(3))))); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)))); } TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsSharedGroups) { constexpr absl::string_view kHloModule = R"( HloModule test %cond { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %constant = s32[] constant(5) ROOT result = pred[] compare(%gte.0, %constant), direction=LT } %body { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1 %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2 %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0} %ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test" %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]} %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]} %gte.3 = f32[1024,1024] get-tuple-element(%param), index=3 %gte.4 = f32[1024,1024] get-tuple-element(%param), index=4 %concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0} %ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test" %slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]} %slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]} %add.0 = f32[1024,1024] add(%slice.0, %slice.2) %add.1 = f32[1024,1024] add(%slice.1, %slice.3) %sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2) %sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3) %constant = s32[] constant(1) %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant) ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1) } ENTRY test_main { %param.0 = f32[1024,1024] parameter(0) %param.1 = f32[1024,1024] parameter(1) %param.2 = f32[1024,1024] parameter(2) %param.3 = f32[1024,1024] parameter(3) %constant.0 = s32[] constant(0) %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1, %param.2, %param.3) ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConcatCodeMotion(2).Run(module.get())); ASSERT_TRUE(changed); VLOG(1) << module->ToString(); auto loop = op::While( op::Tuple(op::Constant(), AllOf(op::Shape("f32[2048,1024]"), op::Concatenate(op::Parameter(0), op::Parameter(1))), AllOf(op::Shape("f32[2048,1024]"), op::Concatenate(op::Parameter(2), op::Parameter(3))))); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)))); } TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsDifferentOrders) { constexpr absl::string_view kHloModule = R"( HloModule test %cond { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %constant = s32[] constant(5) ROOT result = pred[] compare(%gte.0, %constant), direction=LT } %body { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1 %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2 %concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0} %ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test" %slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]} %slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]} %gte.3 = f32[1024,1024] get-tuple-element(%param), index=3 %gte.4 = f32[1024,1024] get-tuple-element(%param), index=4 %concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0} %ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test" %slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]} %slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]} %add.0 = f32[1024,1024] add(%slice.0, %slice.3) %add.1 = f32[1024,1024] add(%slice.1, %slice.2) %sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2) %sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3) %constant = s32[] constant(1) %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant) ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1) } ENTRY test_main { %param.0 = f32[1024,1024] parameter(0) %param.1 = f32[1024,1024] parameter(1) %param.2 = f32[1024,1024] parameter(2) %param.3 = f32[1024,1024] parameter(3) %constant.0 = s32[] constant(0) %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1, %param.2, %param.3) ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConcatCodeMotion(2).Run(module.get())); EXPECT_TRUE(changed); VLOG(1) << module->ToString(); auto loop = op::While( op::Tuple(op::Constant(), op::Parameter(0), op::Parameter(1), AllOf(op::Shape("f32[2048,1024]"), op::Concatenate(op::Parameter(2), op::Parameter(3))))); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(loop), op::GetTupleElement(loop), op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)))); } TEST_F(WhileLoopConcatCodeMotionTest, NonElementwiseOps) { constexpr absl::string_view kHloModule = R"( HloModule test %cond { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %constant = s32[] constant(5) ROOT result = pred[] compare(%gte.0, %constant), direction=LT } %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } %body { %param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0) %gte.0 = s32[] get-tuple-element(%param), index=0 %gte.1 = f32[1024,1024] get-tuple-element(%param), index=1 %gte.2 = f32[1024,1024] get-tuple-element(%param), index=2 %reshape.0 = f32[1,1024,1024] reshape(%gte.1) %reshape.1 = f32[1,1024,1024] reshape(%gte.2) %concat = f32[2,1024,1024] concatenate(%reshape.0, %reshape.1), dimensions={0} %ccall = f32[2,1024,1024] custom-call(%concat), custom_call_target="test" %slice.0 = f32[1,1024,1024] slice(%ccall), slice={[0:1], [0:1024], [0:1024]} %slice.1 = f32[1,1024,1024] slice(%ccall), slice={[1:2], [0:1024], [0:1024]} %reshape.2 = f32[1024,1024] reshape(%slice.0 ) %reshape.3 = f32[1024,1024] reshape(%slice.1) %gte.3 = f32[1024] get-tuple-element(%param), index=3 %gte.4 = f32[1024] get-tuple-element(%param), index=4 %constant.0 = f32[] constant(0) %reduce.0 = f32[1024] reduce(%reshape.0, %constant.0), to_apply=%sum, dimensions={0,1} %reduce.1 = f32[1024] reduce(%reshape.1, %constant.0), to_apply=%sum, dimensions={0,1} %add.0 = f32[1024] add(%reduce.0, %gte.3) %add.1 = f32[1024] add(%reduce.1, %gte.4) %br0 = f32[1024,1024] broadcast(%add.0), dimensions={1} %br1 = f32[1024,1024] broadcast(%add.1), dimensions={1} %sub.0 = f32[1024,1024] subtract(%reshape.2, %br0) %sub.1 = f32[1024,1024] subtract(%reshape.3, %br1) %gte.5 = f32[1] get-tuple-element(%param), index=5 %gte.6 = f32[1] get-tuple-element(%param), index=6 %reshape.4 = f32[] reshape(%gte.5) %reshape.5 = f32[] reshape(%gte.6) %br2 = f32[1024] broadcast(%reshape.4), dimensions={} %br3 = f32[1024] broadcast(%reshape.5), dimensions={} %add.2 = f32[1024] add(%add.0, %br2) %add.3 = f32[1024] add(%add.1, %br3) %inc0 = f32[] add(%constant.0, %reshape.4) %inc1 = f32[] add(%constant.0, %reshape.5) %reshape.6 = f32[1] reshape(%inc0) %reshape.7 = f32[1] reshape(%inc1) %constant = s32[] constant(1) %increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant) ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) tuple(%increment_iteration, %sub.0, %sub.1, %add.2, %add.3, %reshape.6, %reshape.7) } ENTRY test_main { %param.0 = f32[1024,1024] parameter(0) %param.1 = f32[1024,1024] parameter(1) %param.2 = f32[1024] parameter(2) %param.3 = f32[1024] parameter(3) %param.4 = f32[1] parameter(4) %param.5 = f32[1] parameter(5) %constant.0 = s32[] constant(0) %while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) tuple(%constant.0, %param.0, %param.1, %param.2, %param.3, %param.4, %param.5) ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) while(%while_init), condition=%cond, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule)); TF_ASSERT_OK_AND_ASSIGN(bool changed, WhileLoopConcatCodeMotion(2).Run(module.get())); ASSERT_TRUE(changed); VLOG(1) << module->ToString(); auto loop = op::While( op::Tuple(op::Constant(), AllOf(op::Shape("f32[2,1024,1024]"), op::Concatenate(op::Reshape(op::Parameter(0)), op::Reshape(op::Parameter(1)))), AllOf(op::Shape("f32[2,1024]"), op::Concatenate(op::Reshape(op::Parameter(2)), op::Reshape(op::Parameter(3)))), AllOf(op::Shape("f32[2]"), op::Concatenate(op::Parameter(4), op::Parameter(5))))); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(loop), op::Reshape(op::Slice(op::GetTupleElement(loop))), op::Reshape(op::Slice(op::GetTupleElement(loop))), op::Reshape(op::Slice(op::GetTupleElement(loop))), op::Reshape(op::Slice(op::GetTupleElement(loop))), op::Slice(op::GetTupleElement(loop)), op::Slice(op::GetTupleElement(loop)))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a90867e2-ebc0-4bfe-a0e5-bb10947c0d3f
cpp
tensorflow/tensorflow
sort_simplifier
third_party/xla/xla/service/sort_simplifier.cc
third_party/xla/xla/service/sort_simplifier_test.cc
#include "xla/service/sort_simplifier.h" #include <memory> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" namespace xla { namespace { absl::StatusOr<bool> RemoveUnusedOperandFromSort(HloInstruction* sort) { if (!sort->shape().IsTuple()) { return false; } HloComputation* computation = sort->parent(); if (computation->root_instruction() == sort) { return false; } absl::flat_hash_set<int64_t> used_indices; for (const HloInstruction* user : sort->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { return false; } used_indices.insert(user->tuple_index()); } auto comparator = sort->to_apply(); for (int64_t i = 0; i < sort->operand_count() * 2; ++i) { if (comparator->parameter_instruction(i)->user_count() > 0) { used_indices.insert(i / 2); } } if (used_indices.size() == sort->operand_count()) { return false; } std::vector<HloInstruction*> operands; std::vector<const Shape*> new_shapes; for (int64_t i = 0; i < sort->operand_count(); ++i) { if (used_indices.contains(i)) { operands.push_back(sort->mutable_operand(i)); new_shapes.push_back(&sort->operand(i)->shape()); } } Shape new_sort_shape = new_shapes.size() == 1 ? *new_shapes[0] : ShapeUtil::MakeTupleShapeWithPtrs(new_shapes); HloInstruction* new_sort = computation->AddInstruction( sort->CloneWithNewOperands(new_sort_shape, operands)); absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>> replacements; int64_t parameter_number = 0; for (int64_t i = 0; i < sort->operand_count(); ++i) { auto* old_lhs_parameter = comparator->parameter_instruction(i * 2); auto* old_rhs_parameter = comparator->parameter_instruction(i * 2 + 1); if (used_indices.contains(i)) { Shape scalar_shape = ShapeUtil::MakeShape(sort->operand(i)->shape().element_type(), {}); replacements[old_lhs_parameter] = HloInstruction::CreateParameter( parameter_number, scalar_shape, absl::StrCat("p.", parameter_number / 2, ".lhs")); ++parameter_number; replacements[old_rhs_parameter] = HloInstruction::CreateParameter( parameter_number, scalar_shape, absl::StrCat("p.", parameter_number / 2, ".rhs")); ++parameter_number; } else { replacements[old_lhs_parameter] = nullptr; replacements[old_rhs_parameter] = nullptr; } } HloModule* module = sort->GetModule(); HloComputation* new_compare = module->AddEmbeddedComputation( comparator->CloneWithReplacements(&replacements)); new_sort->set_to_apply(new_compare); absl::flat_hash_map<int64_t, HloInstruction*> result_map; if (new_sort->shape().IsTuple()) { int64_t new_index = 0; for (int64_t i = 0; i < sort->operand_count(); ++i) { if (used_indices.count(i)) { result_map[i] = computation->AddInstruction(HloInstruction::CreateGetTupleElement( *new_shapes[new_index], new_sort, new_index)); ++new_index; } } } else { CHECK_EQ(used_indices.size(), 1); result_map[*used_indices.begin()] = new_sort; } std::vector<HloInstruction*> users(sort->users().begin(), sort->users().end()); for (HloInstruction* user : users) { TF_RETURN_IF_ERROR( user->ReplaceAllUsesWith(result_map.at(user->tuple_index()))); TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(user)); } return true; } } absl::StatusOr<bool> SortSimplifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(2) << "HLO module before SortSimplifier:"; XLA_VLOG_LINES(2, module->ToString()); bool changed = false; std::vector<HloInstruction*> sort_instrs; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { absl::c_copy_if(comp->instructions(), std::back_inserter(sort_instrs), HloPredicateIsOp<HloOpcode::kSort>); } for (HloInstruction* sort_instr : sort_instrs) { TF_ASSIGN_OR_RETURN(bool result, RemoveUnusedOperandFromSort(sort_instr)); changed |= result; } if (changed) { VLOG(2) << "HLO module after SortSimplifier:"; XLA_VLOG_LINES(2, module->ToString()); } else { VLOG(2) << "HLO module unchanged after SortSimplifier"; } return changed; } }
#include "xla/service/sort_simplifier.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" namespace xla { namespace { namespace m = match; using SortSimplifierTest = HloTestBase; TEST_F(SortSimplifierTest, RemoveUnusedSortOperandArrayResult) { const char* hlo_string = R"( HloModule permutation_sort compare { p.0.lhs = f32[] parameter(0) p.0.rhs = f32[] parameter(1) p.1.lhs = s32[] parameter(2) p.1.rhs = s32[] parameter(3) ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT } ENTRY sort_computation { keys = f32[64,8732]{1,0} parameter(0) values = s32[64,8732]{1,0} parameter(1) sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values), dimensions={1}, to_apply=compare ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); SortSimplifier simplifier; uint64_t num_executions = 0; do { num_executions++; } while (simplifier.Run(module.get()).value()); EXPECT_EQ(num_executions, 2); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(0)))); } TEST_F(SortSimplifierTest, RemoveUnusedSortOperandTuple) { const char* hlo_string = R"( HloModule permutation_sort compare { p.0.lhs = f32[] parameter(0) p.0.rhs = f32[] parameter(1) p.1.lhs = s32[] parameter(2) p.1.rhs = s32[] parameter(3) p.2.lhs = u32[] parameter(4) p.2.rhs = u32[] parameter(5) ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT } ENTRY sort_computation { keys = f32[64,87] parameter(0) values.0 = s32[64,87] parameter(1) values.1 = u32[64,87] parameter(2) sort = (f32[64,87], s32[64,87], u32[64,87]) sort( keys, values.0, values.1), dimensions={1}, to_apply=compare gte.0 = f32[64,87] get-tuple-element(sort), index=0 gte.1 = u32[64,87] get-tuple-element(sort), index=2 ROOT tuple = (f32[64,87], u32[64,87]) tuple(gte.0, gte.1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); SortSimplifier simplifier; EXPECT_TRUE(simplifier.Run(module.get()).value()); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Tuple( m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 0), m::GetTupleElement(m::Sort(m::Parameter(0), m::Parameter(2)), 1)))); } TEST_F(SortSimplifierTest, DontRemoveUnusedSortKey) { const char* hlo_string = R"( HloModule permutation_sort compare { p.0.lhs = f32[] parameter(0) p.0.rhs = f32[] parameter(1) p.1.lhs = s32[] parameter(2) p.1.rhs = s32[] parameter(3) ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT } ENTRY sort_computation { keys = f32[64,8732]{1,0} parameter(0) values = s32[64,8732]{1,0} parameter(1) sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values), dimensions={1}, to_apply=compare ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); SortSimplifier simplifier; EXPECT_FALSE(simplifier.Run(module.get()).value()); } TEST_F(SortSimplifierTest, RemoveUnusedFirstOperand) { const char* hlo_string = R"( HloModule permutation_sort compare { p.0.lhs = f32[] parameter(0) p.0.rhs = f32[] parameter(1) p.1.lhs = s32[] parameter(2) p.1.rhs = s32[] parameter(3) ROOT lt = pred[] compare(p.1.lhs, p.1.rhs), direction=LT } ENTRY sort_computation { keys = f32[64,8732]{1,0} parameter(0) values = s32[64,8732]{1,0} parameter(1) sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values), dimensions={1}, to_apply=compare ROOT gte = s32[64,8732]{1,0} get-tuple-element(sort), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); SortSimplifier simplifier; uint64_t num_executions = 0; do { num_executions++; } while (simplifier.Run(module.get()).value()); EXPECT_EQ(num_executions, 2); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Sort(m::Parameter(1)))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sort_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sort_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8585c3c1-3110-4d7f-9df7-16798a77f5b7
cpp
tensorflow/tensorflow
stochastic_convert_decomposer
third_party/xla/xla/service/stochastic_convert_decomposer.cc
third_party/xla/xla/service/stochastic_convert_decomposer_test.cc
#include "xla/service/stochastic_convert_decomposer.h" #include <cstdint> #include <limits> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/shape_inference.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { absl::Status DecomposeStochasticConvert(HloComputation* comp, HloInstruction* instruction) { CHECK(instruction->opcode() == HloOpcode::kStochasticConvert) << "requires a stochastic_convert instruction to decompose, but got: " << instruction->opcode(); CHECK(instruction->operand_count() == 2) << "requires 2 operands for stochastic convert, but got: " << instruction->operand_count(); HloInstruction* operand = instruction->mutable_operand(0); HloInstruction* random = instruction->mutable_operand(1); PrimitiveType from_type = operand->shape().element_type(); PrimitiveType random_type = random->shape().element_type(); PrimitiveType to_type = instruction->shape().element_type(); TF_RETURN_IF_ERROR(ShapeInference::InferStochasticConvertShape( operand->shape(), random->shape(), to_type) .status()); VLOG(1) << "Decomposing instruction: " << instruction->ToString(); if (primitive_util::IsSignedIntegralType(to_type)) { TF_ASSIGN_OR_RETURN(HloInstruction * operand_sign, MakeUnaryHlo(HloOpcode::kSign, operand)); TF_ASSIGN_OR_RETURN(HloInstruction * should_neg, MakeCompareHlo(Comparison::Direction::kLt, operand_sign, MakeScalarLike(operand_sign, 0))); TF_ASSIGN_OR_RETURN(HloInstruction * operand_abs, MakeUnaryHlo(HloOpcode::kAbs, operand)); TF_ASSIGN_OR_RETURN(HloInstruction * truncated_fp, MakeUnaryHlo(HloOpcode::kFloor, operand_abs)); TF_ASSIGN_OR_RETURN( HloInstruction * fractional, MakeBinaryHlo(HloOpcode::kSubtract, operand_abs, truncated_fp)); if (from_type == F16) { fractional = MakeConvertToHlo(fractional, F32); } TF_ASSIGN_OR_RETURN( HloInstruction * fixed_fractional, MakeBinaryHlo( HloOpcode::kMultiply, fractional, MakeScalarLike(fractional, IPow<double>(2, primitive_util::BitWidth( random_type))))); TF_ASSIGN_OR_RETURN( HloInstruction * should_round_up, MakeCompareHlo(Comparison::Direction::kLt, random, MakeConvertToHlo(fixed_fractional, random_type))); HloInstruction* truncated_int = MakeConvertToHlo(truncated_fp, to_type); TF_ASSIGN_OR_RETURN( truncated_int, MakeSelectHlo(should_round_up, MakeBinaryHlo(HloOpcode::kAdd, truncated_int, MakeScalarLike(truncated_int, 1)) .value(), truncated_int)); TF_ASSIGN_OR_RETURN( HloInstruction * result, MakeSelectHlo(should_neg, MakeUnaryHlo(HloOpcode::kNegate, truncated_int).value(), truncated_int)); auto to_bits = primitive_util::BitWidth(to_type); auto min = static_cast<int64_t>( (static_cast<uint64_t>(1) + ~static_cast<uint64_t>(1)) << (to_bits - 1)); TF_ASSIGN_OR_RETURN(HloInstruction * is_min, MakeCompareHlo(Comparison::Direction::kLe, operand, MakeScalarLike(operand, min))); TF_ASSIGN_OR_RETURN( result, MakeSelectHlo(is_min, MakeScalarLike(result, min), result)); auto max = static_cast<int64_t>((static_cast<uint64_t>(1) << (to_bits - 1)) - 1); TF_ASSIGN_OR_RETURN(HloInstruction * is_max, MakeCompareHlo(Comparison::Direction::kGe, operand, MakeScalarLike(operand, max))); TF_ASSIGN_OR_RETURN( result, MakeSelectHlo(is_max, MakeScalarLike(result, max), result)); TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(result)); TF_RETURN_IF_ERROR(comp->RemoveInstruction(instruction)); return absl::OkStatus(); } return Internal("Unsupported stochastic convert: from %s to %s", PrimitiveType_Name(from_type), PrimitiveType_Name(to_type)); } absl::StatusOr<bool> StochasticConvertDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instruction : computation->MakeInstructionPostOrder()) { if (instruction->opcode() != HloOpcode::kStochasticConvert) { continue; } TF_RETURN_IF_ERROR(DecomposeStochasticConvert(computation, instruction)); changed = true; } } return changed; } }
#include "xla/service/stochastic_convert_decomposer.h" #include <string> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; using StochasticConvertDecomposerTest = HloTestBase; using ::testing::HasSubstr; TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertF32ToS32) { const std::string module_str = R"( HloModule module ENTRY entry { %arg_param.1 = f32[65536]{0} parameter(0) %random_param.2 = u32[65536]{0} parameter(1) ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); StochasticConvertDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Select(op::Compare(), op::Broadcast(), op::Select(op::Compare(), op::Broadcast(), op::Select(op::Compare(), op::Negate(), op::Select())))); } TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertBF16ToS8) { const std::string module_str = R"( HloModule module ENTRY entry { %arg_param.1 = bf16[65536]{0} parameter(0) %random_param.2 = u16[65536]{0} parameter(1) ROOT %stochastic-convert.3 = s8[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u16[65536]{0} %random_param.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); StochasticConvertDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Select(op::Compare(), op::Broadcast(), op::Select(op::Compare(), op::Broadcast(), op::Select(op::Compare(), op::Negate(), op::Select())))); } TEST_F(StochasticConvertDecomposerTest, WrongRandomBitWidth) { const std::string module_str = R"( HloModule module ENTRY entry { %arg_param.1 = bf16[65536]{0} parameter(0) %random_param.2 = u32[65536]{0} parameter(1) ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); StochasticConvertDecomposer decomposer; auto result = decomposer.Run(module.get()); EXPECT_NE(absl::OkStatus(), result.status()); EXPECT_THAT(result.status().message(), HasSubstr("have same bits")); } TEST_F(StochasticConvertDecomposerTest, WrongRandomType) { const std::string module_str = R"( HloModule module ENTRY entry { %arg_param.1 = f32[65536]{0} parameter(0) %random_param.2 = s32[65536]{0} parameter(1) ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, s32[65536]{0} %random_param.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); StochasticConvertDecomposer decomposer; auto result = decomposer.Run(module.get()); EXPECT_NE(absl::OkStatus(), result.status()); EXPECT_THAT(result.status().message(), HasSubstr("must be unsigned integers")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ebd2d89a-eae6-4d8a-8ae4-35118a5a7120
cpp
tensorflow/tensorflow
map_inliner
third_party/xla/xla/service/map_inliner.cc
third_party/xla/xla/service/map_inliner_test.cc
#include "xla/service/map_inliner.h" #include <memory> #include <string> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { class MapInlinerVisitor : public DfsHloVisitorWithDefault { public: explicit MapInlinerVisitor(HloComputation* computation) : computation_(computation) {} absl::Status DefaultAction(HloInstruction* ) override { return absl::OkStatus(); } absl::Status HandleMap(HloInstruction* map) override; absl::StatusOr<bool> Run(HloComputation* computation); private: HloComputation* computation_; bool changed_ = false; }; absl::StatusOr<bool> MapInlinerVisitor::Run(HloComputation* computation) { changed_ = false; computation_ = computation; TF_RETURN_IF_ERROR(computation->root_instruction()->Accept(this)); return changed_; } absl::Status MapInlinerVisitor::HandleMap(HloInstruction* map) { HloComputation* function = map->to_apply(); HloInstruction& root = *function->root_instruction(); if (hlo_query::AllOperandsAreParameters(root)) { if (root.opcode() == HloOpcode::kFusion) { return absl::OkStatus(); } VLOG(10) << "inlining map({X ... Y}, op) => : op(X ... Y) with function " << root.ToShortString(); if (root.opcode() == HloOpcode::kParameter) { TF_RETURN_IF_ERROR( map->ReplaceAllUsesWith(map->operands()[root.parameter_number()])); TF_RETURN_IF_ERROR(computation_->RemoveInstruction(map)); } else if (root.opcode() == HloOpcode::kConstant) { HloInstruction* constant = computation_->AddInstruction(root.Clone()); HloInstruction* placed_instruction = computation_->AddInstruction( HloInstruction::CreateBroadcast(map->shape(), constant, {})); TF_RETURN_IF_ERROR( computation_->ReplaceInstruction(map, placed_instruction)); } else { std::vector<HloInstruction*> params; for (int64_t o = 0; o < root.operands().size(); o++) { params.push_back(map->operands()[root.operand(o)->parameter_number()]); } HloInstruction* placed_instruction = computation_->AddInstruction( root.CloneWithNewOperands(map->shape(), params)); TF_RETURN_IF_ERROR( computation_->ReplaceInstruction(map, placed_instruction)); } changed_ = true; return absl::OkStatus(); } return absl::OkStatus(); } absl::StatusOr<bool> MapInliner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { MapInlinerVisitor visitor(nullptr); bool changed = false; for (HloComputation* computation : module->computations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool computation_changed, visitor.Run(computation)); changed |= computation_changed; } return changed; } }
#include "xla/service/map_inliner.h" #include <memory> #include <utility> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/xla_data.pb.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { using MapInlinerTest = HloTestBase; TEST_F(MapInlinerTest, MapMax) { Shape r0f32 = ShapeUtil::MakeShape(F32, {}); auto max_builder = HloComputation::Builder(TestName()); auto param1 = max_builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "x")); auto param2 = max_builder.AddInstruction( HloInstruction::CreateParameter(1, r0f32, "y")); max_builder.AddInstruction(HloInstruction::CreateBinary( param1->shape(), HloOpcode::kMaximum, param1, param2)); auto max_f32 = max_builder.Build(); auto builder = HloComputation::Builder("MapMaxFunction"); auto lhs = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({1, 2, 3, 4}))); auto rhs = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({4, 3, 2, 1}))); builder.AddInstruction( HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get())); auto computation = builder.Build(); auto hlo_module = CreateNewVerifiedModule(); hlo_module->AddEmbeddedComputation(std::move(max_f32)); hlo_module->AddEntryComputation(std::move(computation)); MapInliner inliner; EXPECT_TRUE(inliner.Run(hlo_module.get()).value()); EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), op::Maximum(lhs, rhs)); auto result = ExecuteAndTransfer(hlo_module->Clone(), {}); auto expected = LiteralUtil::CreateR1<float>({4, 3, 3, 4}); EXPECT_TRUE(LiteralTestUtil::Equal(result, expected)); } TEST_F(MapInlinerTest, MapConstant) { Shape r0f32 = ShapeUtil::MakeShape(F32, {}); auto const2_builder = HloComputation::Builder(TestName()); auto param1 = const2_builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "x")); (void)param1; const2_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f))); auto const2_f32 = const2_builder.Build(); auto builder = HloComputation::Builder("MapConstFunction"); auto lhs = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}}))); builder.AddInstruction( HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get())); auto computation = builder.Build(); auto hlo_module = CreateNewVerifiedModule(); hlo_module->AddEmbeddedComputation(std::move(const2_f32)); hlo_module->AddEntryComputation(std::move(computation)); HloInstruction* root = hlo_module->entry_computation()->root_instruction(); MapInliner inliner; EXPECT_TRUE(inliner.Run(hlo_module.get()).value()); root = hlo_module->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Broadcast(op::Constant())); auto result = ExecuteAndTransfer(hlo_module->Clone(), {}); auto expected = LiteralUtil::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}}); EXPECT_TRUE(LiteralTestUtil::Equal(result, expected)); } TEST_F(MapInlinerTest, MapSubtractOppositeOrder) { Shape r0f32 = ShapeUtil::MakeShape(F32, {}); auto max_builder = HloComputation::Builder(TestName()); auto param1 = max_builder.AddInstruction( HloInstruction::CreateParameter(1, r0f32, "x")); auto param2 = max_builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "y")); max_builder.AddInstruction(HloInstruction::CreateBinary( param1->shape(), HloOpcode::kSubtract, param1, param2)); auto max_f32 = max_builder.Build(); auto builder = HloComputation::Builder("MapSubFunction"); auto lhs = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({1, 2, 3, 4}))); auto rhs = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({4, 3, 2, 1}))); builder.AddInstruction( HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get())); auto computation = builder.Build(); auto hlo_module = CreateNewVerifiedModule(); hlo_module->AddEmbeddedComputation(std::move(max_f32)); hlo_module->AddEntryComputation(std::move(computation)); MapInliner inliner; EXPECT_TRUE(inliner.Run(hlo_module.get()).value()); EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), op::Subtract(rhs, lhs)); auto result = ExecuteAndTransfer(hlo_module->Clone(), {}); auto expected = LiteralUtil::CreateR1<float>({3, 1, -1, -3}); EXPECT_TRUE(LiteralTestUtil::Equal(result, expected)); } TEST_F(MapInlinerTest, MapParameter) { Shape r0f32 = ShapeUtil::MakeShape(F32, {}); auto param_builder = HloComputation::Builder(TestName()); param_builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32, "p0")); param_builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, "p1")); auto param_f32 = param_builder.Build(); auto builder = HloComputation::Builder("MapParamFunction"); auto lhs = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1))); auto rhs = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4))); builder.AddInstruction( HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, param_f32.get())); auto computation = builder.Build(); auto hlo_module = CreateNewVerifiedModule(); hlo_module->AddEmbeddedComputation(std::move(param_f32)); hlo_module->AddEntryComputation(std::move(computation)); MapInliner inliner; EXPECT_TRUE(inliner.Run(hlo_module.get()).value()); EXPECT_THAT(hlo_module->entry_computation()->root_instruction(), rhs); auto result = ExecuteAndTransfer(hlo_module->Clone(), {}); auto expected = LiteralUtil::CreateR0<float>(4); EXPECT_TRUE(LiteralTestUtil::Equal(result, expected)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/map_inliner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/map_inliner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f522495c-3a6f-4a50-bfd7-e7546441fa98
cpp
tensorflow/tensorflow
triangular_solve_expander
third_party/xla/xla/service/triangular_solve_expander.cc
third_party/xla/xla/service/triangular_solve_expander_test.cc
#include "xla/service/triangular_solve_expander.h" #include <algorithm> #include <cstdint> #include <memory> #include <numeric> #include <string> #include <vector> #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/math.h" #include "xla/hlo/builder/lib/matrix.h" #include "xla/hlo/builder/lib/slicing.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/builder/xla_computation.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace { XlaOp DiagonalBlocks(XlaOp a, int64_t block_size) { XlaBuilder* builder = a.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(a)); int ndims = shape.rank(); int64_t n = ShapeUtil::GetDimension(shape, -1); int64_t num_blocks = n / block_size; absl::Span<int64_t const> batch_dims = absl::MakeConstSpan( shape.dimensions().begin(), shape.dimensions().begin() + (ndims - 2)); XlaOp diag_blocks; if (n == block_size) { std::vector<int64_t> permutation(ndims); std::iota(permutation.begin(), permutation.end(), 1); permutation.insert(permutation.end() - 2, 0); return Transpose(Broadcast(a, {1}), permutation); } if (n > block_size) { auto start_indices = Transpose(Broadcast(Mul(Iota(builder, S32, num_blocks), ConstantR0<int32_t>(builder, block_size)), {2}), {1, 0}); std::vector<int64_t> slice_sizes(ndims); GatherDimensionNumbers dim_numbers; for (int i = 0; i < ndims - 2; ++i) { dim_numbers.add_offset_dims(i); slice_sizes[i] = ShapeUtil::GetDimension(shape, i); } slice_sizes[ndims - 2] = slice_sizes[ndims - 1] = block_size; dim_numbers.add_offset_dims(ndims - 1); dim_numbers.add_offset_dims(ndims); dim_numbers.add_start_index_map(ndims - 2); dim_numbers.add_start_index_map(ndims - 1); dim_numbers.set_index_vector_dim(1); diag_blocks = Gather(a, start_indices, dim_numbers, slice_sizes); } if (n % block_size != 0) { auto last_blocks = SliceInMinorDims(a, {n - n % block_size, n - n % block_size}, {n, n}); PaddingConfig config = MakeNoPaddingConfig(ndims); int64_t padding = block_size - n % block_size; config.mutable_dimensions(ndims - 2)->set_edge_padding_high(padding); last_blocks = Pad(last_blocks, Zero(builder, shape.element_type()), config); auto eye = IdentityMatrix(builder, shape.element_type(), padding, padding); config = MakeNoPaddingConfig(2); config.mutable_dimensions(0)->set_edge_padding_low(n % block_size); eye = Pad(eye, Zero(builder, shape.element_type()), config); eye = Broadcast(eye, batch_dims); last_blocks = ConcatInDim(builder, {last_blocks, eye}, ndims - 1); TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(last_blocks)); auto shape_dims = blocks_shape.dimensions(); auto last_blocks_dims = std::vector<int64_t>(ndims); std::copy(shape_dims.begin(), shape_dims.end(), last_blocks_dims.begin()); last_blocks_dims.insert(last_blocks_dims.end() - 2, 1); last_blocks = Reshape(last_blocks, last_blocks_dims); if (n > block_size) { diag_blocks = ConcatInDim(builder, {diag_blocks, last_blocks}, ndims - 2); } else { diag_blocks = last_blocks; } } return diag_blocks; }); } XlaOp SolveWithInvertedDiagonalBlocks(XlaOp a, XlaOp b, XlaOp inv_diag_blocks, bool left_side, bool lower, bool transpose_a, bool conjugate_a, PrecisionConfig::Precision precision) { XlaBuilder* builder = a.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape blocks_shape, builder->GetShape(inv_diag_blocks)); TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b)); int64_t block_size = ShapeUtil::GetDimension(blocks_shape, -1); TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); int64_t ndims = a_shape.rank(); int64_t n = ShapeUtil::GetDimension(a_shape, -1); int64_t num_blocks = n / block_size + (n % block_size != 0); int64_t m_dim = (left_side) ? -1 : -2; int64_t m = ShapeUtil::GetDimension(b_shape, m_dim); std::vector<XlaOp> update_ops; int bdims = b_shape.rank(); int64_t block_dim = (left_side) ? bdims - 2 : bdims - 1; XlaOp x; for (int i = 0; i < num_blocks; i++) { bool backward = left_side ^ lower ^ transpose_a; auto j = backward ? num_blocks - 1 - i : i; int64_t block = (n % block_size != 0 && j + 1 == num_blocks) ? n % block_size : block_size; auto inv_block = MaybeConjugate(Collapse(SliceInMinorDims(inv_diag_blocks, {j, 0, 0}, {j + 1, block, block}), {ndims - 2, ndims - 1}), conjugate_a); int64_t k = std::min((j + 1) * block_size, n); std::vector<int64_t> start = {j * block_size, 0}; std::vector<int64_t> end = {k, m}; if (!left_side) { std::swap(start[0], start[1]); std::swap(end[0], end[1]); } auto b_row = SliceInMinorDims(b, start, end); XlaOp remainder; if (i == 0) { remainder = b_row; } else { if (backward) { start = {j * block_size, std::max(int64_t{0}, (num_blocks - i) * block_size)}; end = {k, n}; } else { start = {j * block_size, 0}; end = {k, std::min(i * block_size, n)}; } if (!left_side ^ transpose_a) { std::swap(start[0], start[1]); std::swap(end[0], end[1]); } auto a_row = MaybeConjugate(SliceInMinorDims(a, start, end), conjugate_a); if (left_side) { remainder = b_row - BatchDot(a_row, transpose_a, x, false, precision); } else { remainder = b_row - BatchDot(x, false, a_row, transpose_a, precision); } } XlaOp x_update; if (left_side) { x_update = BatchDot(inv_block, transpose_a, remainder, false, precision); } else { x_update = BatchDot(remainder, false, inv_block, transpose_a, precision); } if (i == 0) { x = x_update; } else { if (backward) { x = ConcatInDim(builder, {x_update, x}, block_dim); } else { x = ConcatInDim(builder, {x, x_update}, block_dim); } } } return x; }); } } XlaOp TriangularSolveExpander::InvertDiagonalBlocks( XlaOp diag_blocks, bool lower_triangular, PrecisionConfig::Precision precision) { XlaBuilder* builder = diag_blocks.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(diag_blocks)); int64_t block_size = ShapeUtil::GetDimension(shape, -1); int64_t num_blocks = ShapeUtil::ElementsIn(shape) / IPow(block_size, 2); diag_blocks = Reshape(diag_blocks, {num_blocks, block_size, block_size}); diag_blocks = Triangle(diag_blocks, lower_triangular); auto diags = GetMatrixDiagonal(diag_blocks); auto scaled_diag_blocks = Div(diag_blocks, diags, {0, 2}); auto identity = IdentityMatrix(builder, shape.element_type(), block_size, block_size); auto neg_identity = -identity; auto pos_one = Reshape(One(builder, shape.element_type()), {1, 1}); auto start_index = ConstantR0<int>(builder, lower_triangular ? 0 : block_size - 1); auto output_block = DynamicUpdateSlice(neg_identity, pos_one, {start_index, start_index}); XlaOp output = Broadcast(output_block, {num_blocks}); std::vector<Shape> tuple_shapes = { ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(shape.element_type(), {num_blocks, block_size, block_size}), ShapeUtil::MakeShape(shape.element_type(), {num_blocks, block_size, block_size})}; Shape tuple_shape = ShapeUtil::MakeTupleShape(tuple_shapes); auto init_i = One(builder, S32); auto init = Tuple(builder, {init_i, output, scaled_diag_blocks}); std::unique_ptr<XlaBuilder> condb = builder->CreateSubBuilder("InvertDiagCond"); { auto i = GetTupleElement( Parameter(condb.get(), 0, tuple_shape, "InvertDiagCondTuple"), 0); Lt(i, ConstantR0<int32_t>(condb.get(), block_size)); } TF_ASSIGN_OR_RETURN(auto cond, condb->Build()); std::unique_ptr<XlaBuilder> bodyb = builder->CreateSubBuilder("InvertDiagBody"); { auto input_tuple = Parameter(bodyb.get(), 0, tuple_shape, "InvertDiagBodyTuple"); auto i = GetTupleElement(input_tuple, 0); auto body_out = GetTupleElement(input_tuple, 1); auto body_input = GetTupleElement(input_tuple, 2); auto zero = ConstantR0<int32_t>(bodyb.get(), 0); auto j = lower_triangular ? i : ScalarLike(i, block_size - 1) - i; auto input_row = DynamicSlice(body_input, {zero, j, zero}, {num_blocks, 1, block_size}); DotDimensionNumbers dnums; dnums.add_lhs_batch_dimensions(0); dnums.add_rhs_batch_dimensions(0); dnums.add_lhs_contracting_dimensions(2); dnums.add_rhs_contracting_dimensions(1); PrecisionConfig precision_proto; precision_proto.add_operand_precision(precision); precision_proto.add_operand_precision(precision); auto update = -DotGeneral(input_row, body_out, dnums, &precision_proto); body_out = DynamicUpdateSlice(body_out, update, {zero, j, zero}); auto next_i = i + ScalarLike(i, 1); Tuple(bodyb.get(), {next_i, body_out, body_input}); } TF_ASSIGN_OR_RETURN(auto body, bodyb->Build()); auto invert_while = While(cond, body, init); auto inv_diag_blocks = GetTupleElement(invert_while, 1); inv_diag_blocks = Div(inv_diag_blocks, diags, {0, 1}); return Reshape(inv_diag_blocks, shape.dimensions()); }); } XlaOp TriangularSolveExpander::SolveByInvertingDiagonalBlocks( XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a, bool conjugate_a, bool unit_diagonal, PrecisionConfig::Precision precision) { XlaBuilder* builder = a.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); const int64_t ndims = a_shape.rank(); int64_t k = ShapeUtil::GetDimension(a_shape, -1); if (unit_diagonal) { a = lower ? Select(TriangleMask(a, -1), a, ZerosLike(a)) : Select(TriangleMask(a, 0), ZerosLike(a), a); a = xla::Add(a, IdentityMatrix(builder, a_shape.element_type(), k, k), {ndims - 2, ndims - 1}); } else { a = Triangle(a, lower); } int64_t block_size = std::min(block_size_, k); auto diag_blocks = DiagonalBlocks(a, block_size); auto inv_diag_blocks = InvertDiagonalBlocks(diag_blocks, lower, precision); return SolveWithInvertedDiagonalBlocks(a, b, inv_diag_blocks, left_side, lower, transpose_a, conjugate_a, precision); }); } XlaOp TriangularSolveExpander::SolveDirectly( XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a, bool conjugate_a, bool unit_diagonal, PrecisionConfig::Precision precision) { XlaBuilder* builder = a.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b)); int64_t m = ShapeUtil::GetDimension(b_shape, -2); int64_t n = ShapeUtil::GetDimension(b_shape, -1); const int64_t a_size = ShapeUtil::GetDimension(a_shape, -1); a = MaybeConjugate(a, conjugate_a); bool backwards = transpose_a ^ lower ^ !left_side; for (int64_t i = 0; i < a_size; ++i) { int64_t j = backwards ? i : (a_size - i - 1); std::vector<int64_t> b_row_start, b_row_end; if (left_side) { b_row_start = {j, 0}; b_row_end = {j + 1, n}; } else { b_row_start = {0, j}; b_row_end = {m, j + 1}; } auto b_row = SliceInMinorDims(b, b_row_start, b_row_end); std::vector<int64_t> a_start = {j, backwards ? 0 : (j + 1)}; std::vector<int64_t> a_end = {j + 1, backwards ? j : a_size}; if (transpose_a ^ !left_side) { std::swap(a_start[0], a_start[1]); std::swap(a_end[0], a_end[1]); } auto a_chunk = SliceInMinorDims(a, a_start, a_end); if (left_side) { bool which = transpose_a ^ lower; auto b_chunk = SliceInMinorDims(b, {which ? 0 : (j + 1), 0}, {which ? j : m, n}); b_row = b_row - BatchDot(a_chunk, transpose_a, b_chunk, false, precision); } else { bool which = transpose_a ^ !lower; auto b_chunk = SliceInMinorDims(b, {0, which ? 0 : (j + 1)}, {m, which ? j : n}); b_row = b_row - BatchDot(b_chunk, false, a_chunk, transpose_a, precision); } if (!unit_diagonal) { auto a_diag = SliceInMinorDims(a, {j, j}, {j + 1, j + 1}); b_row = b_row / a_diag; } b = UpdateSliceInMinorDims(b, b_row, b_row_start); } return b; }); } XlaOp TriangularSolveExpander::BuildTriangularSolve( XlaOp a, XlaOp b, bool left_side, bool lower, bool transpose_a, bool conjugate_a, bool unit_diagonal, int64_t block_size, PrecisionConfig::Precision precision) { XlaBuilder* builder = a.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a)); TF_ASSIGN_OR_RETURN(Shape b_shape, builder->GetShape(b)); if (a_shape.rank() != b_shape.rank()) { return InvalidArgument( "Arguments to TriangularSolve have shapes with different ranks: " "%s vs. %s", ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape)); } const int64_t ndims = a_shape.rank(); if (ndims < 2) { return InvalidArgument( "Arguments to TriangularSolve was rank %d but must have rank >= 2.", ndims); } std::vector<int64_t> batch_dimensions; int64_t batch = 1; for (int i = 0; i < ndims - 2; ++i) { int64_t a_size = a_shape.dimensions(i); int64_t b_size = b_shape.dimensions(i); if (a_size != b_size) { return InvalidArgument( "Batch dimensions of arguments to TriangularSolve must be equal; " "shapes were %s and %s.", ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape)); } batch_dimensions.push_back(a_size); batch *= a_size; } if (ShapeUtil::GetDimension(a_shape, -1) != ShapeUtil::GetDimension(a_shape, -2)) { return InvalidArgument( "The 'a' argument to TriangularSolve must be a batched square matrix;" " shape was: %s", ShapeUtil::HumanString(a_shape)); } const int64_t m = ShapeUtil::GetDimension(b_shape, -2); const int64_t n = ShapeUtil::GetDimension(b_shape, -1); if ((left_side ? m : n) != ShapeUtil::GetDimension(a_shape, -1)) { return InvalidArgument( "Arguments to TriangularSolve have incompatible matrix shapes %s and " "%s", ShapeUtil::HumanString(a_shape), ShapeUtil::HumanString(b_shape)); } int64_t a_size = ShapeUtil::GetDimension(a_shape, -1); if (ShapeUtil::IsZeroElementArray(b_shape)) { return b; } if (a_size == 1) { return unit_diagonal ? b : Div(b, MaybeConjugate(a, conjugate_a)); } if (UseDirectSolves() && batch > block_size_ / 16 && a_size < block_size_ / 4) { return SolveDirectly(a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal, precision); } else { return SolveByInvertingDiagonalBlocks(a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal, precision); } }); } TriangularSolveExpander::TriangularSolveExpander(int64_t block_size) : block_size_(block_size) { CHECK_GE(block_size_, 1); } bool TriangularSolveExpander::InstructionMatchesPattern( HloInstruction* instruction) { return instruction->opcode() == HloOpcode::kTriangularSolve; } absl::StatusOr<HloInstruction*> TriangularSolveExpander::ExpandInstruction( HloInstruction* instruction) { const TriangularSolveOptions& options = instruction->triangular_solve_options(); const std::string name = absl::StrFormat( "xla.triangular_solve_%s_%s_%s_%s_%s_%s", instruction->operand(0)->shape().ToString(), instruction->operand(1)->shape().ToString(), options.left_side() ? "left" : "right", options.lower() ? "lower" : "upper", TriangularSolveOptions_Transpose_Name(options.transpose_a()), options.unit_diagonal() ? "unit" : "nonunit"); HloModule* module = instruction->GetModule(); HloComputation*& computation = computation_cache_.emplace(name, nullptr).first->second; if (!computation) { XlaBuilder builder(name); XlaOp a = Parameter(&builder, 0, instruction->operand(0)->shape(), "a"); XlaOp b = Parameter(&builder, 1, instruction->operand(1)->shape(), "b"); bool transpose_a = options.transpose_a() != TriangularSolveOptions::NO_TRANSPOSE; bool conjugate_a = options.transpose_a() == TriangularSolveOptions::ADJOINT; BuildTriangularSolve(a, b, options.left_side(), options.lower(), transpose_a, conjugate_a, options.unit_diagonal(), block_size_, PrecisionConfig::HIGHEST); TF_ASSIGN_OR_RETURN(XlaComputation xla_computation, builder.Build()); TF_ASSIGN_OR_RETURN(ProgramShape program_shape, xla_computation.GetProgramShape()); HloModuleConfig config(program_shape); TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto( xla_computation.proto(), config)); HloCloneContext context(module); computation = module->DeepCloneComputation(new_module->entry_computation(), &context); } return instruction->parent()->AddInstruction(HloInstruction::CreateCall( instruction->shape(), instruction->operands(), computation)); } }
#include "xla/service/triangular_solve_expander.h" #include <memory> #include <utility> #include "xla/literal.h" #include "xla/reference_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" namespace xla { namespace { class TriangularExpanderTest : public HloTestBase, public ::testing::WithParamInterface<int32_t> {}; TEST_P(TriangularExpanderTest, TestBlockSize) { auto block_size = GetParam(); std::string hlo_string = R"( HloModule TensorFlowTriangularSolve ENTRY main { a = f32[256,256]{1,0} parameter(0) b = f32[256,192]{1,0} parameter(1) ROOT triangular-solve = f32[256,192]{1,0} triangular-solve(a, b), left_side=true, unit_diagonal=true, lower=true, transpose_a=NO_TRANSPOSE } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); { TriangularSolveExpander triangular_solve_expander(block_size); TF_ASSERT_OK_AND_ASSIGN( bool result, RunHloPass(&triangular_solve_expander, module.get())); EXPECT_TRUE(result); } Array2D<float> a(256, 256); for (int64_t row = 0; row < a.dim(0); ++row) { a(row, row) = 1; if (row > 0) { a(row, row - 1) = 0.01; } } Array2D<float> b(256, 192); const float kMax = static_cast<float>(b.dim(0) * b.dim(1) + 1); for (int64_t row = 0; row < b.dim(0); ++row) { for (int64_t col = 0; col < b.dim(1); ++col) { b(row, col) = static_cast<float>(row + col + 1) / kMax; } } auto la = LiteralUtil::CreateR2FromArray2D(a); auto lb = LiteralUtil::CreateR2FromArray2D(b); TF_ASSERT_OK_AND_ASSIGN(Literal lx, Execute(std::move(module), {&la, &lb})); auto x_shape = lx.shape(); EXPECT_EQ(x_shape.dimensions_size(), 2); EXPECT_EQ(x_shape.dimensions(0), b.dim(0)); EXPECT_EQ(x_shape.dimensions(1), b.dim(1)); Array2D<float> x(x_shape.dimensions(0), x_shape.dimensions(1)); x.SetValues(lx.data<float>()); auto ref_b = ReferenceUtil::MatmulArray2D(a, x); auto ref_lb = LiteralUtil::CreateR2FromArray2D(*ref_b); EXPECT_TRUE( LiteralTestUtil::NearOrEqual(ref_lb, lb, ErrorSpec{0.001, 0.001})); } INSTANTIATE_TEST_CASE_P(TriangularExpanderTestInstances, TriangularExpanderTest, ::testing::Range(2, 256, 7)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/triangular_solve_expander.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/triangular_solve_expander_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
849cb0cf-5da9-4a0b-9798-8ff341a34518
cpp
tensorflow/tensorflow
transpose_folding
third_party/xla/xla/service/transpose_folding.cc
third_party/xla/xla/service/transpose_folding_test.cc
#include "xla/service/transpose_folding.h" #include <algorithm> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" namespace xla { namespace { TransposeFolding::OperandIndices CanFoldOperandsIntoConvolution( const HloInstruction& convolution, const TransposeFolding::TransposableConvOperandsFn& transposable_conv_operands) { if (HloOpcode::kConvolution != convolution.opcode()) { return {}; } TransposeFolding::OperandIndices operand_set; for (int64_t i = 0; i < convolution.operand_count(); ++i) { auto& operand = *convolution.operand(i); if (operand.opcode() == HloOpcode::kTranspose) { operand_set.push_back(i); } } return transposable_conv_operands(convolution, operand_set); } bool IsNonIdentityTranspose(const HloInstruction* instruction) { if (instruction->opcode() == HloOpcode::kTranspose) { for (int dim = 0; dim < instruction->dimensions().size(); ++dim) { if (dim != instruction->dimensions(dim)) { return true; } } } return false; } void TransposeDims(tsl::protobuf::RepeatedField<int64_t>& dims, absl::Span<const int64_t> transpose_dims) { for (auto& dim : dims) { dim = transpose_dims[dim]; } } using InstructionOperandsPair = std::pair<HloInstruction*, TransposeFolding::OperandIndices>; absl::Status FoldTransposeIntoDot(InstructionOperandsPair& pair) { HloInstruction* dot = pair.first; DotDimensionNumbers new_dot_dims = dot->dot_dimension_numbers(); HloInstruction* lhs = dot->mutable_operand(0); HloInstruction* rhs = dot->mutable_operand(1); for (int64_t operand_index : pair.second) { if (operand_index == 0) { TransposeDims(*new_dot_dims.mutable_lhs_contracting_dimensions(), lhs->dimensions()); TransposeDims(*new_dot_dims.mutable_lhs_batch_dimensions(), lhs->dimensions()); lhs = lhs->mutable_operand(0); } else { CHECK_EQ(operand_index, 1); TransposeDims(*new_dot_dims.mutable_rhs_contracting_dimensions(), rhs->dimensions()); TransposeDims(*new_dot_dims.mutable_rhs_batch_dimensions(), rhs->dimensions()); rhs = rhs->mutable_operand(0); } } return dot->parent()->ReplaceWithNewInstruction( dot, HloInstruction::CreateDot(dot->shape(), lhs, rhs, new_dot_dims, dot->precision_config())); } bool FoldTransposeIntoConvolution(InstructionOperandsPair& pair) { auto& convolution = *pair.first; auto& operand_indices = pair.second; if (operand_indices.empty()) { return false; } const ConvolutionDimensionNumbers& dnums = convolution.convolution_dimension_numbers(); ConvolutionDimensionNumbers new_dnums = dnums; HloInstruction* new_lhs; const int64_t kLhsIdx = 0; if (absl::c_linear_search(operand_indices, kLhsIdx)) { HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx); const auto& transpose_dimensions = transpose.dimensions(); HloInstruction& transpose_operand = *transpose.mutable_operand(0); new_dnums.set_input_batch_dimension( transpose_dimensions[dnums.input_batch_dimension()]); new_dnums.set_input_feature_dimension( transpose_dimensions[dnums.input_feature_dimension()]); for (auto& input_spatial_dimension : *new_dnums.mutable_input_spatial_dimensions()) { input_spatial_dimension = transpose_dimensions[input_spatial_dimension]; } new_lhs = &transpose_operand; } else { new_lhs = convolution.mutable_operand(kLhsIdx); } HloInstruction* new_rhs; const int64_t kRhsIdx = 1; if (absl::c_linear_search(operand_indices, kRhsIdx)) { HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx); const auto& transpose_dimensions = transpose.dimensions(); HloInstruction& transpose_operand = *transpose.mutable_operand(0); new_dnums.set_kernel_input_feature_dimension( transpose_dimensions[dnums.kernel_input_feature_dimension()]); new_dnums.set_kernel_output_feature_dimension( transpose_dimensions[dnums.kernel_output_feature_dimension()]); for (auto& kernel_spatial_dimension : *new_dnums.mutable_kernel_spatial_dimensions()) { kernel_spatial_dimension = transpose_dimensions[kernel_spatial_dimension]; } new_rhs = &transpose_operand; } else { new_rhs = convolution.mutable_operand(kRhsIdx); } auto new_conv = HloInstruction::CreateConvolve( convolution.shape(), new_lhs, new_rhs, convolution.feature_group_count(), convolution.batch_group_count(), convolution.window(), new_dnums, convolution.precision_config()); TF_CHECK_OK(convolution.parent()->ReplaceWithNewInstruction( &convolution, std::move(new_conv))); return true; } } TransposeFolding::TransposeFolding( CanFoldTransposeOperand dot_can_fold_transpose_operand, TransposableConvOperandsFn transposable_conv_operands) : dot_can_fold_transpose_operand_( std::move(dot_can_fold_transpose_operand)), transposable_conv_operands_(std::move(transposable_conv_operands)) {} absl::StatusOr<bool> TransposeFolding::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { std::vector<InstructionOperandsPair> foldable_dots; std::vector<InstructionOperandsPair> foldable_convolutions; FunctionVisitor visit_fn([this, &foldable_dots, &foldable_convolutions]( HloInstruction* instruction) { if (instruction->opcode() == HloOpcode::kDot) { if ((instruction->operand(0)->shape().rank() < 2) || (instruction->operand(1)->shape().rank() < 2)) { return absl::OkStatus(); } OperandIndices operand_indices; for (int64_t i = 0; i < 2; ++i) { if (!IsNonIdentityTranspose(instruction->operand(i))) { continue; } TF_ASSIGN_OR_RETURN(bool can_fold_operand, dot_can_fold_transpose_operand_(*instruction, i)); if (can_fold_operand) { operand_indices.push_back(i); } } if (!operand_indices.empty()) { foldable_dots.emplace_back(instruction, operand_indices); } } { OperandIndices operand_indices = CanFoldOperandsIntoConvolution( *instruction, transposable_conv_operands_); if (!operand_indices.empty()) { foldable_convolutions.emplace_back(instruction, operand_indices); } } return absl::OkStatus(); }); for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { TF_RETURN_IF_ERROR(comp->Accept(&visit_fn)); } bool changed = false; for (InstructionOperandsPair& pair : foldable_dots) { TF_RETURN_IF_ERROR(FoldTransposeIntoDot(pair)); changed = true; } for (InstructionOperandsPair& pair : foldable_convolutions) { changed |= FoldTransposeIntoConvolution(pair); } return changed; } absl::StatusOr<bool> TransposeFolding::IsRowColumnTransposeDotOperand(const HloInstruction& dot, int64_t operand_idx) { TF_RET_CHECK(dot.opcode() == HloOpcode::kDot); TF_RET_CHECK(dot.operand_count() > operand_idx); const HloInstruction& transpose = *dot.operand(operand_idx); TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose); const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers(); auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions() : dot_dims.rhs_batch_dimensions(); auto contracting_dims = (operand_idx == 0) ? dot_dims.lhs_contracting_dimensions() : dot_dims.rhs_contracting_dimensions(); return (batch_dims.size() == transpose.shape().rank() - 2) && (contracting_dims.size() == 1) && absl::c_all_of(batch_dims, [&](int64_t dim) { return transpose.dimensions(dim) == dim; }); } }
#include "xla/service/transpose_folding.h" #include <memory> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/shape_inference.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/logging.h" #include "tsl/platform/status_matchers.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; using ::tsl::testing::IsOkAndHolds; using TransposeFoldingTest = HloTestBase; TEST_F(TransposeFoldingTest, FoldDotTranspose) { constexpr absl::string_view kHloString = R"( HloModule FoldDotTranspose ENTRY entry_computation { x = f32[2,3]{1,0} parameter(0) y = f32[2,3]{1,0} parameter(1) transpose = f32[3,2]{1,0} transpose(y), dimensions={1,0} ROOT dot = f32[2,2]{1,0} dot(x, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Dot(op::Parameter(0), op::Parameter(1), 1, 1)); } TEST_F(TransposeFoldingTest, DontFoldTransposeOfBatchDimByDefault) { constexpr absl::string_view kHloString = R"( HloModule FoldDotTranspose ENTRY entry_computation { x = f32[2,3] parameter(0) y = f32[3,2] parameter(1) transpose = f32[2,3] transpose(y), dimensions={1,0} ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false)); } TEST_F(TransposeFoldingTest, FoldTransposeOfBatchWhenPermitted) { constexpr absl::string_view kHloString = R"( HloModule FoldDotTranspose ENTRY entry_computation { x = f32[5,2,3] parameter(0) y = f32[3,5,4] parameter(1) transpose = f32[5,3,4] transpose(y), dimensions={1,0,2} ROOT dot = f32[5,2,4] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); TransposeFolding transpose_folding( [](const HloInstruction&, int64_t) { return true; }); EXPECT_THAT(transpose_folding.Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Dot(op::Parameter(0), op::Parameter(1), 2, 0)); } TEST_F(TransposeFoldingTest, DontFoldTransposeOfRank1Dot) { constexpr absl::string_view kHloString = R"( HloModule FoldDotTranspose ENTRY entry_computation { x = f32[3] parameter(0) y = f32[3,2] parameter(1) transpose = f32[2,3] transpose(y), dimensions={1,0} ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={}, rhs_batch_dims={}, lhs_contracting_dims={0}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false)); } TEST_F(TransposeFoldingTest, DontFoldTransposeOfDotWithoutContractingDims) { constexpr absl::string_view kHloString = R"( HloModule FoldDotTranspose ENTRY entry_computation { x = f32[3,4] parameter(0) y = f32[3,4,6,7] parameter(1) transpose = f32[3,4,7,6] transpose(y), dimensions={0,1,3,2} ROOT dot = f32[3,4,7,6] dot(x, transpose), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={}, rhs_contracting_dims={} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false)); } TEST_F(TransposeFoldingTest, FoldDotTransposeConstant) { constexpr absl::string_view kHloString = R"( HloModule FoldDotTransposeConstant ENTRY entry_computation { constant = f32[2,1]{1,0} constant({ { 1 }, { 2 } }) transpose = f32[1,2]{1,0} transpose(constant), dimensions={1,0} constant.1 = f32[3,2]{1,0} constant({ { 1, 2 }, { 3, 4 }, { 5, 6 } }) transpose.1 = f32[2,3]{1,0} transpose(constant.1), dimensions={1,0} ROOT dot = f32[1,3]{1,0} dot(transpose, transpose.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Dot(op::Constant(), op::Constant(), 0, 1)); } TEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) { auto builder = HloComputation::Builder("entry"); HloInstruction* const1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); HloInstruction* const2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); HloInstruction* const3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary( const1->shape(), HloOpcode::kAdd, const1, const2)); HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary( const2->shape(), HloOpcode::kSubtract, const2, const3)); HloInstruction* mul = builder.AddInstruction(HloInstruction::CreateBinary( add->shape(), HloOpcode::kMultiply, add, sub)); auto module = CreateNewVerifiedModule("fuse_with_constant_operands"); HloComputation* entry_computation = module->AddEntryComputation(builder.Build(mul)); HloInstruction* call = module->OutlineExpressionFromComputation( {add, sub, mul}, "entry", entry_computation); EXPECT_EQ(call, entry_computation->root_instruction()); HloComputation* callee_computation = call->to_apply(); EXPECT_THAT(call->operands(), ::testing::UnorderedElementsAre(const1, const2, const3)); EXPECT_EQ(6, callee_computation->instruction_count()); } TEST_F(TransposeFoldingTest, FoldDotTransposeInCall) { constexpr absl::string_view kHloString = R"( HloModule FoldDotTransposeInCall callee { name.0 = f32[2,3]{1,0} parameter(0) name.1 = f32[2,3]{1,0} parameter(1) transpose.clone = f32[3,2]{1,0} transpose(name.0), dimensions={1,0} ROOT dot.clone = f32[2,2]{1,0} dot(name.1, transpose.clone), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY entry_computation { y = f32[2,3]{1,0} parameter(1) x = f32[2,3]{1,0} parameter(0) ROOT call = f32[2,2]{1,0} call(y, x), to_apply=callee } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); const HloComputation* callee = module->GetComputationWithName("callee"); ASSERT_NE(callee, nullptr); EXPECT_THAT(callee->root_instruction(), op::Dot(op::Parameter(1), op::Parameter(0), 1, 1)); } TEST_F(TransposeFoldingTest, FoldConvDimSwapTransposeRhs) { auto builder = HloComputation::Builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}), "y")); HloInstruction* transpose_y = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 0, 2, 3})); auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(); Window window; for (int i = 0; i < 2; ++i) { WindowDimension* dim = window.add_dimensions(); dim->set_padding_low(0); dim->set_padding_high(0); dim->set_base_dilation(1); dim->set_window_dilation(1); dim->set_stride(1); dim->set_size( transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i))); } absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape( x->shape(), transpose_y->shape(), 1, 1, window, dnums, std::nullopt); EXPECT_IS_OK(conv_shape); HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( conv_shape.value(), x, transpose_y, 1, 1, window, dnums, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule("test_module"); HloComputation* entry_computation = module->AddEntryComputation(builder.Build(conv)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); absl::flat_hash_set<HloInstruction*> instruction_set( entry_computation->instructions().begin(), entry_computation->instructions().end()); CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation."; CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation."; CHECK_EQ(1, instruction_set.size()) << "entry_computation should contain exactly 3 instructions."; HloInstruction* new_conv = *instruction_set.begin(); EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode()); EXPECT_EQ(dnums.kernel_input_feature_dimension(), new_conv->convolution_dimension_numbers() .kernel_output_feature_dimension()); EXPECT_EQ(dnums.kernel_output_feature_dimension(), new_conv->convolution_dimension_numbers() .kernel_input_feature_dimension()); } TEST_F(TransposeFoldingTest, FoldConvComplexTransposeRhs) { auto builder = HloComputation::Builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {1, 2, 1, 3}), "y")); HloInstruction* transpose_y = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 3, 0, 2})); auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(); Window window; for (int i = 0; i < 2; ++i) { WindowDimension* dim = window.add_dimensions(); dim->set_padding_low(0); dim->set_padding_high(0); dim->set_base_dilation(1); dim->set_window_dilation(1); dim->set_stride(1); dim->set_size( transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i))); } absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape( x->shape(), transpose_y->shape(), 1, 1, window, dnums, std::nullopt); EXPECT_IS_OK(conv_shape); HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( conv_shape.value(), x, transpose_y, 1, 1, window, dnums, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule("test_module"); HloComputation* entry_computation = module->AddEntryComputation(builder.Build(conv)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); absl::flat_hash_set<HloInstruction*> instruction_set( entry_computation->instructions().begin(), entry_computation->instructions().end()); CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation."; CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation."; CHECK_EQ(1, instruction_set.size()) << "entry_computation should contain exactly 3 instructions."; HloInstruction* new_conv = *instruction_set.begin(); EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode()); EXPECT_EQ(dnums.kernel_input_feature_dimension(), new_conv->convolution_dimension_numbers() .kernel_output_feature_dimension()); EXPECT_EQ(dnums.kernel_spatial_dimensions(1), new_conv->convolution_dimension_numbers() .kernel_input_feature_dimension()); EXPECT_EQ( dnums.kernel_output_feature_dimension(), new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(0)); EXPECT_EQ( dnums.kernel_spatial_dimensions(0), new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(1)); } TEST_F(TransposeFoldingTest, FoldConvTransposeLhs) { auto builder = HloComputation::Builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}), "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), "y")); HloInstruction* transpose_x = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 2, 3})); auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(); Window window; for (int i = 0; i < 2; ++i) { WindowDimension* dim = window.add_dimensions(); dim->set_padding_low(0); dim->set_padding_high(0); dim->set_base_dilation(1); dim->set_window_dilation(1); dim->set_stride(1); dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i))); } absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape( transpose_x->shape(), y->shape(), 1, 1, window, dnums, std::nullopt); EXPECT_IS_OK(conv_shape); HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( conv_shape.value(), transpose_x, y, 1, 1, window, dnums, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule("test_module"); HloComputation* entry_computation = module->AddEntryComputation(builder.Build(conv)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); absl::flat_hash_set<HloInstruction*> instruction_set( entry_computation->instructions().begin(), entry_computation->instructions().end()); EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation."; EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation."; EXPECT_EQ(1, instruction_set.size()) << "entry_computation should contain exactly 3 instructions."; HloInstruction* new_conv = *instruction_set.begin(); EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode()); EXPECT_EQ(dnums.input_feature_dimension(), new_conv->convolution_dimension_numbers().input_batch_dimension()); EXPECT_EQ( dnums.input_batch_dimension(), new_conv->convolution_dimension_numbers().input_feature_dimension()); EXPECT_EQ( dnums.input_spatial_dimensions(0), new_conv->convolution_dimension_numbers().input_spatial_dimensions(0)); EXPECT_EQ( dnums.input_spatial_dimensions(1), new_conv->convolution_dimension_numbers().input_spatial_dimensions(1)); EXPECT_EQ( dnums.output_spatial_dimensions(0), new_conv->convolution_dimension_numbers().output_spatial_dimensions(0)); EXPECT_EQ( dnums.output_spatial_dimensions(1), new_conv->convolution_dimension_numbers().output_spatial_dimensions(1)); } TEST_F(TransposeFoldingTest, FoldConvComplexTransposeLhs) { auto builder = HloComputation::Builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}), "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), "y")); HloInstruction* transpose_x = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 3, 2})); auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(); Window window; for (int i = 0; i < 2; ++i) { WindowDimension* dim = window.add_dimensions(); dim->set_padding_low(0); dim->set_padding_high(0); dim->set_base_dilation(1); dim->set_window_dilation(1); dim->set_stride(1); dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i))); } absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape( transpose_x->shape(), y->shape(), 1, 1, window, dnums, std::nullopt); EXPECT_IS_OK(conv_shape); HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( conv_shape.value(), transpose_x, y, 1, 1, window, dnums, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule("test_module"); HloComputation* entry_computation = module->AddEntryComputation(builder.Build(conv)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); absl::flat_hash_set<HloInstruction*> instruction_set( entry_computation->instructions().begin(), entry_computation->instructions().end()); EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation."; EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation."; EXPECT_EQ(1, instruction_set.size()) << "entry_computation should contain exactly 3 instructions."; HloInstruction* new_conv = *instruction_set.begin(); EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode()); EXPECT_EQ(dnums.input_feature_dimension(), new_conv->convolution_dimension_numbers().input_batch_dimension()); EXPECT_EQ( dnums.input_batch_dimension(), new_conv->convolution_dimension_numbers().input_feature_dimension()); EXPECT_EQ( dnums.input_spatial_dimensions(0), new_conv->convolution_dimension_numbers().input_spatial_dimensions(1)); EXPECT_EQ( dnums.input_spatial_dimensions(1), new_conv->convolution_dimension_numbers().input_spatial_dimensions(0)); EXPECT_EQ( dnums.output_spatial_dimensions(0), new_conv->convolution_dimension_numbers().output_spatial_dimensions(0)); EXPECT_EQ( dnums.output_spatial_dimensions(1), new_conv->convolution_dimension_numbers().output_spatial_dimensions(1)); } TEST_F(TransposeFoldingTest, FoldBatchDotTranspose) { constexpr absl::string_view kHloString = R"( HloModule FoldBatchDotTranspose ENTRY entry_computation { x = f32[7,7,2,3]{3,2,1,0} parameter(0) y = f32[7,7,2,3]{3,2,1,0} parameter(1) transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,3,2} ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Dot(op::Parameter(0), op::Parameter(1), 3, 3)); } TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeBatch) { constexpr absl::string_view kHloString = R"( HloModule NoFoldBatchDotTransposeBatch ENTRY entry_computation { x = f32[7,7,2,3]{3,2,1,0} parameter(0) y = f32[7,7,2,3]{3,2,1,0} parameter(1) transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={1,0,3,2} ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false)); } TEST_F(TransposeFoldingTest, FoldBatchDotTransposeNonContiguousBatch) { constexpr absl::string_view kHloString = R"( HloModule FoldBatchDotTransposeNonContiguousBatch ENTRY entry_computation { x = f32[7,2,7,3]{3,2,1,0} parameter(0) y = f32[7,2,7,3]{3,2,1,0} parameter(1) transpose = f32[7,3,7,2]{3,2,1,0} transpose(y), dimensions={0,3,2,1} ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3}, rhs_contracting_dims={1}, lhs_batch_dims={0,2}, rhs_batch_dims={0,2} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Dot(op::Parameter(0), op::Parameter(1), 3, 3)); } TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeIdentity) { constexpr absl::string_view kHloString = R"( HloModule NoFoldBatchDotTransposeIdentity ENTRY entry_computation { x = f32[7,7,2,3]{3,2,1,0} parameter(0) y = f32[7,7,3,2]{3,2,1,0} parameter(1) transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,2,3} ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cfec981d-552c-4da4-9a2f-8bcbd2723fba
cpp
tensorflow/tensorflow
reshape_mover
third_party/xla/xla/service/reshape_mover.cc
third_party/xla/xla/service/reshape_mover_test.cc
#include "xla/service/reshape_mover.h" #include <algorithm> #include <memory> #include <vector> #include "absl/algorithm/container.h" #include "xla/permutation_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "tsl/platform/errors.h" namespace xla { namespace { bool IsRearrange(const HloInstruction* instruction) { return instruction->opcode() == HloOpcode::kReshape || instruction->opcode() == HloOpcode::kTranspose; } bool AreEquivalentRearranges(const HloInstruction* a, const HloInstruction* b) { if (a->opcode() != b->opcode() || !ShapeUtil::SameDimensions(a->shape(), b->shape())) { return false; } switch (a->opcode()) { case HloOpcode::kTranspose: return a->dimensions() == b->dimensions(); case HloOpcode::kReshape: return ShapeUtil::SameDimensions(a->operand(0)->shape(), b->operand(0)->shape()); default: return false; } } absl::InlinedVector<int64_t, 4> TransposedBcastDims( absl::Span<const int64_t> bcast_dims, absl::Span<const int64_t> transpose_dims) { auto inv_perm = InversePermutation(transpose_dims); absl::InlinedVector<int64_t, 4> new_bcast_dims; for (int64_t dim : bcast_dims) { new_bcast_dims.push_back(inv_perm[dim]); } return new_bcast_dims; } } bool ReshapeMover::CanTriviallyRearrange(const HloInstruction* instr, const HloInstruction* rearrange) { CHECK(IsRearrange(rearrange)) << rearrange->ToString(); if (rearrange->opcode() == HloOpcode::kReshape && ShapeUtil::Equal(rearrange->shape(), rearrange->operand(0)->shape())) { return true; } if (rearrange->opcode() == HloOpcode::kTranspose && IsIdentityPermutation(rearrange->dimensions())) { return true; } if (instr->opcode() == HloOpcode::kConstant) { return true; } if (instr->opcode() == HloOpcode::kRng && instr->user_count() == 1) { return true; } if (instr->opcode() == HloOpcode::kBroadcast) { if (!absl::c_is_sorted(instr->dimensions())) { return false; } if (rearrange->opcode() == HloOpcode::kReshape) { return ShapeUtil::IsScalar(instr->operand(0)->shape()) || (options_.reshape_of_1d_broadcast_is_cheap && ShapeUtil::TrueRank(instr->operand(0)->shape()) <= 1) || (options_.reshape_of_1d_broadcast_is_cheap && ShapeUtil::ReshapeLeavesDimensionsUnmodified( rearrange->shape(), rearrange->operand(0)->shape(), instr->dimensions()) .has_value()); } if (rearrange->opcode() == HloOpcode::kTranspose) { return absl::c_is_sorted(TransposedBcastDims( instr->dimensions(), InversePermutation(rearrange->dimensions()))); } } return false; } const HloInstruction* ReshapeMover::FirstNontrivialRearrange( absl::Span<const HloInstruction* const> instrs) { auto rearrange_it = absl::c_find_if(instrs, [&](const HloInstruction* instr) { return IsRearrange(instr) && !CanTriviallyRearrange(instr->operand(0), instr); }); if (rearrange_it == instrs.end()) { return nullptr; } return *rearrange_it; } bool ReshapeMover::IsReshapeMoveCandidate(HloInstruction* instruction) { auto print_no_metadata = HloPrintOptions().set_print_metadata(false); VLOG(5) << "** Checking instruction: " << instruction->ToString(print_no_metadata); if (!instruction->IsElementwise()) { return false; } const HloInstruction* rearrange = FirstNontrivialRearrange(instruction->operands()); if (rearrange == nullptr) { return false; } return absl::c_all_of( instruction->operands(), [&](const HloInstruction* operand) { return (IsRearrange(operand) && AreEquivalentRearranges(operand, rearrange)) || (!IsRearrange(operand) && CanTriviallyRearrange(operand, rearrange)); }); } absl::StatusOr<HloInstruction*> ReshapeMover::ApplyInverseRearrange( const HloInstruction* rearrange, HloInstruction* operand) { switch (rearrange->opcode()) { case HloOpcode::kReshape: { Shape new_shape = ShapeUtil::ChangeElementType( rearrange->operand(0)->shape(), operand->shape().element_type()); if (operand->shape() != new_shape) { return MakeReshapeHlo(new_shape, operand); } else { return operand; } } case HloOpcode::kTranspose: { if (!IsIdentityPermutation(rearrange->dimensions())) { return MakeTransposeHlo(operand, InversePermutation(rearrange->dimensions())); } else { return operand; } } default: LOG(FATAL) << "Invalid rearrange op: " << rearrange->ToString(); } } absl::StatusOr<bool> ReshapeMover::SinkRearrangeOperands( HloInstruction* instruction) { auto print_no_metadata = HloPrintOptions().set_print_metadata(false); HloComputation* computation = instruction->parent(); const HloInstruction* rearrange = FirstNontrivialRearrange(instruction->operands()); CHECK(rearrange != nullptr); const Shape& new_operand_shape = rearrange->operand(0)->shape(); VLOG(3) << "** Sinking reshape or transpose: " << instruction->ToString(print_no_metadata) << "\n\tfirst rearrange operand: " << rearrange->ToString(print_no_metadata) << "\n\tnew operand shape: " << ShapeUtil::HumanString(new_operand_shape); auto operands = instruction->operands(); for (size_t i = 0; i < operands.size(); ++i) { VLOG(3) << "Updating operand #" << i << ": " << operands[i]->ToString(print_no_metadata); TF_ASSIGN_OR_RETURN(operands[i], ApplyInverseRearrange(rearrange, operands[i])); VLOG(3) << "Updated operand #" << i << " to: " << operands[i]->ToString(print_no_metadata); } HloInstruction* new_elementwise = computation->AddInstruction(instruction->CloneWithNewOperands( ShapeUtil::ChangeElementType(new_operand_shape, instruction->shape().element_type()), operands)); std::unique_ptr<HloInstruction> new_rearrange; switch (rearrange->opcode()) { case HloOpcode::kReshape: VLOG(3) << "Creating new reshape for new elementwise op: " << new_elementwise->ToString(print_no_metadata); new_rearrange = HloInstruction::CreateReshape(instruction->shape(), new_elementwise); break; case HloOpcode::kTranspose: new_rearrange = HloInstruction::CreateTranspose( instruction->shape(), new_elementwise, rearrange->dimensions()); break; default: LOG(FATAL) << "Bad opcode"; } if (instruction->has_sharding()) { new_elementwise->clear_sharding(); } TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction( instruction, std::move(new_rearrange))); return true; } absl::StatusOr<bool> ReshapeMover::TryReshapeMoveOnCandidates( HloInstructionSet* candidates) { bool removed = true; while (!candidates->empty() && removed) { if (VLOG_IS_ON(5)) { for (const HloInstruction* instruction : *candidates) { VLOG(5) << "candidate " << instruction->ToString(); } } ConstHloInstructionSet rearrange_operands; for (const HloInstruction* instruction : *candidates) { for (const auto* operand : instruction->operands()) { if (IsRearrange(operand)) { rearrange_operands.insert(operand); } } } removed = false; for (auto operand : rearrange_operands) { if (absl::c_any_of(operand->users(), [&](HloInstruction* user) { return !candidates->count(user); })) { for (auto* user : operand->users()) { removed |= candidates->erase(user) > 0; } } } } if (candidates->empty()) { return false; } for (HloInstruction* instruction : *candidates) { if (!ConsumeFuel("reshape-mover", [&] { return absl::StrCat("instruction: ", instruction->ToString(), "\nFull module:\n", instruction->GetModule()->ToString()); })) { break; } TF_ASSIGN_OR_RETURN(bool did_change, SinkRearrangeOperands(instruction)); CHECK(did_change); } return true; } absl::StatusOr<bool> ReshapeMover::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { HloInstructionSet candidates; for (HloInstruction* instruction : comp->instructions()) { if (IsReshapeMoveCandidate(instruction)) { candidates.insert(instruction); } } TF_ASSIGN_OR_RETURN(bool did_change, TryReshapeMoveOnCandidates(&candidates)); changed |= did_change; } return changed; } }
#include "xla/service/reshape_mover.h" #include <memory> #include <string> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/hlo_verifier.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" namespace xla { namespace { namespace m = xla::match; class ReshapeMoverTest : public HloTestBase { protected: absl::Status RunPass(HloModule* module, bool change_expected, ReshapeMoverOptions options = ReshapeMoverOptions{}) { TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(ReshapeMover(options), module)); SCOPED_TRACE(module->ToString()); EXPECT_EQ(changed, change_expected); TF_EXPECT_OK(RunHloPass(HloVerifier(HloVerifierOpts()), module).status()); TF_EXPECT_OK(RunHloPass(HloPassFix<AlgebraicSimplifier>( AlgebraicSimplifierOptions()), module) .status()); return absl::OkStatus(); } }; TEST_F(ReshapeMoverTest, ReshapesWithDifferentInputShapesNotMoved) { const std::string hlo_string = R"( HloModule test ENTRY test { reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0)) reshape1 = f32[8,7] reshape(f32[1,8,7,1] parameter(1)) ROOT add = add(reshape0, reshape1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, OneConstantAndOneReshapesOnRngNotMoved) { const std::string hlo_string = R"( HloModule test ENTRY test { rng = f32[1,8,1,7,1] rng(f32[] constant(0), f32[] constant(1)), distribution=rng_uniform ROOT add = add(f32[8,7] reshape(rng), f32[8,7] constant({...})) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, EquivalentReshapesMoved) { const std::string hlo_string = R"( HloModule test ENTRY test { reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0)) reshape1 = f32[8,7] reshape(f32[1,8,1,7] parameter(1)) ROOT add = f32[8,7] add(reshape0, reshape1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1))))); } TEST_F(ReshapeMoverTest, SinkReshapeBelowSelect) { const std::string hlo_string = R"( HloModule test ENTRY test { ROOT select = f32[2,3] select( pred[2,3] reshape(pred[6] parameter(0)), f32[2,3] reshape(f32[6] parameter(1)), f32[2,3] reshape(f32[6] parameter(2))) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1), m::Parameter(2))))); } TEST_F(ReshapeMoverTest, SinkReshapeBelowSelectWithConstant) { const std::string hlo_string = R"( HloModule test ENTRY test { ROOT select = f32[2,3] select( pred[2,3] reshape(pred[6] parameter(0)), f32[2,3] reshape(f32[6] parameter(1)), f32[2,3] constant({...})) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reshape(m::Select(m::Parameter(0), m::Parameter(1), m::Reshape(m::Constant()))))); } TEST_F(ReshapeMoverTest, OneParameterAndOneReshapeNotMoved) { const std::string hlo_string = R"( HloModule test ENTRY test { reshape0 = f32[8,7] reshape(f32[1,8,1,7] parameter(0)) ROOT add = add(reshape0, f32[8,7] parameter(1)) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, DontSinkReshapesOfConstants) { const std::string hlo_string = R"( HloModule test ENTRY test { ROOT select = select( pred[3,2] parameter(0), f32[3,2] reshape(f32[2,3] constant({...})), f32[3,2] reshape(f32[2,3] constant({...}))) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, OneNontrivialReshapeMoved) { const std::string hlo_string = R"( HloModule test ENTRY test { ROOT add = add( f32[3,2] reshape(f32[2,3] parameter(0)), f32[3,2] constant({...})) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reshape( m::Add(m::Parameter(0), m::Reshape(m::Constant()))))); } TEST_F(ReshapeMoverTest, MultipleReshapes) { const std::string hlo_string = R"( HloModule test ENTRY test { add0 = f32[8,7,1] add( f32[8,7,1] reshape(f32[1,8,1,7] parameter(0)), f32[8,7,1] reshape(f32[1,8,1,7] parameter(1))) ROOT add1 = f32[8,7] add( f32[8,7] reshape(add0), f32[8,7] reshape(f32[8,7,1] parameter(2))) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reshape( m::Add(m::Reshape(m::Add(m::Parameter(0), m::Parameter(1))), m::Parameter(2))))); } TEST_F(ReshapeMoverTest, SinkTransposeAcrossBroadcastScalar) { const std::string hlo_string = R"( HloModule TransposeMulInversedTransposeModule ENTRY TransposeMulInversedTranspose { src0 = f32[20,8]{1,0} parameter(0) transpose0 = f32[8,20]{1,0} transpose(src0), dimensions={1,0} src1 = f32[] parameter(1) broadcast0 = f32[8,20]{1,0} broadcast(src1), dimensions={} ROOT multiply0 = f32[8,20]{1,0} multiply(transpose0, broadcast0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Transpose(m::Multiply( m::Parameter(0), m::Broadcast(m::Parameter(1)))))); } TEST_F(ReshapeMoverTest, ReshapeWithUsersOutsideCandidatesNotSink) { const std::string hlo_string = R"( HloModule ReshapeWithUsersOutsideCandidates ENTRY ReshapeWithMultipleUsers { param0 = f32[20,8]{1,0} parameter(0) reshape0 = f32[8,20]{1,0} reshape(param0) param1 = f32[] parameter(1) broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={} param2 = f32[20,8]{1,0} parameter(2) reshape1 = f32[8,20]{1,0} reshape(param2) param3 = f32[20,8]{1,0} parameter(3) reshape2 = f32[8,20]{1,0} reshape(param3) param4 = f32[8,20]{1,0} parameter(4) add0 = f32[8,20]{1,0} add(reshape0, broadcast0) add1 = f32[8,20]{1,0} add(reshape0, reshape1) add2 = f32[8,20]{1,0} add(reshape1, param4) ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0}, f32[8,20]{1,0}) tuple(add0, add1, add2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink1) { const std::string hlo_string = R"( HloModule ReshapeNoUsersOutsideCandidates1 ENTRY ReshapeWithMultipleUsers1 { param0 = f32[20,8]{1,0} parameter(0) reshape0 = f32[8,20]{1,0} reshape(param0) param1 = f32[] parameter(1) broadcast0 = f32[8,20]{1,0} broadcast(param1), dimensions={} param2 = f32[20,8]{1,0} parameter(2) reshape1 = f32[8,20]{1,0} reshape(param2) param3 = f32[20,8]{1,0} parameter(3) reshape2 = f32[8,20]{1,0} reshape(param3) add0 = f32[8,20]{1,0} add(reshape0, broadcast0) add1 = f32[8,20]{1,0} add(reshape0, reshape1) add2 = f32[8,20]{1,0} add(reshape1, reshape2) ROOT tuple = (f32[8,20]{1,0},f32[8,20]{1,0}, f32[8,20]{1,0}) tuple(add0, add1, add2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Reshape(m::Add(m::Parameter(0), m::Broadcast(m::Parameter(1)))), m::Reshape(m::Add(m::Parameter(0), m::Parameter(2))), m::Reshape(m::Add(m::Parameter(2), m::Parameter(3)))))); } TEST_F(ReshapeMoverTest, ReshapeNoUsersOutsideCandidatesSink2) { const std::string hlo_string = R"( HloModule ReshapeNoUsersOutsideCandidates2 ENTRY ReshapeWithMultipleUsers2 { param0 = f32[20,8]{1,0} parameter(0) reshape0 = f32[8,20]{1,0} reshape(param0) ROOT add0 = f32[8,20]{1,0} add(reshape0, reshape0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reshape(m::Add()))); } TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsNotTrivial) { const std::string hlo_string = R"( HloModule test ENTRY test { a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0} b = f32[2,3] reshape(f32[6] parameter(1)) ROOT add0 = add(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, ReshapeOfRank1BroadcastIsTrivial) { const std::string hlo_string = R"( HloModule test ENTRY test { a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0} b = f32[2,3] reshape(f32[6] parameter(1)) ROOT add0 = add(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); ReshapeMoverOptions options; options.reshape_of_1d_broadcast_is_cheap = true; TF_ASSERT_OK(RunPass(m.get(), true, options)); SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Reshape( m::Add(m::Reshape(m::Broadcast(m::Parameter(0))), m::Parameter(1))))); } TEST_F(ReshapeMoverTest, ReshapeOfRank2BroadcastIsAllowed) { const std::string hlo_string = R"( HloModule test ENTRY test { a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1} b = f32[2,3,35] reshape(f32[2,3,5,7] parameter(1)) ROOT add0 = add(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); ReshapeMoverOptions options; options.reshape_of_1d_broadcast_is_cheap = true; TF_ASSERT_OK(RunPass(m.get(), true, options)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Reshape( m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1))))); } TEST_F(ReshapeMoverTest, SinkDisallowedIfReshapeChangesBroadcastDims) { const std::string hlo_string = R"( HloModule test ENTRY test { a = f32[2,3,35] broadcast(f32[2,3] parameter(0)), dimensions={0,1} b = f32[2,3,35] reshape(f32[6,5,7] parameter(1)) ROOT add0 = add(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, TransposeOfBroadcastIsAllowed) { const std::string hlo_string = R"( HloModule test ENTRY test { a = f32[2,3] broadcast(f32[2] parameter(0)), dimensions={0} b = f32[2,3] transpose(f32[3,2] parameter(1)), dimensions={1,0} ROOT add0 = add(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Transpose( m::Add(m::Broadcast(m::Parameter(0)), m::Parameter(1))))); } TEST_F(ReshapeMoverTest, TransposeReordersBroadcastDims) { const std::string hlo_string = R"( HloModule test ENTRY test { a = f32[2,3,5] broadcast(f32[2,3] parameter(0)), dimensions={0,1} b = f32[2,3,5] transpose(f32[3,2,5] parameter(1)), dimensions={1,0,2} ROOT add0 = add(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), false)); } TEST_F(ReshapeMoverTest, ShardingConsistencyPreservation) { const std::string hlo_string = R"( HloModule module ENTRY entry { copy.2424 = bf16[3,16,128]{2,1,0} parameter(0), sharding={replicated} dot.987 = bf16[3,16,128,4096]{3,2,1,0} parameter(1), sharding={devices=[1,8,1,1]0,1,2,3,4,5,6,7} reshape.5843 = bf16[3,16,128,1,4096]{4,3,2,1,0} reshape(dot.987), sharding={devices=[1,8,1,1,1]0,1,2,3,4,5,6,7} transpose.21172 = bf16[3,1,4096,16,128]{2,1,4,3,0} transpose(reshape.5843), dimensions={0,3,4,1,2}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7} reshape.291 = bf16[3,16,128]{2,1,0} reshape(copy.2424), sharding={devices=[1,8,1]0,1,2,3,4,5,6,7} broadcast.21176 = bf16[3,1,4096,16,128]{4,3,2,1,0} broadcast(reshape.291), dimensions={0,3,4}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7} multiply.21177 = bf16[3,1,4096,16,128]{2,1,4,3,0} multiply(transpose.21172, broadcast.21176), sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7} ROOT slice.21180 = bf16[1,1,4096,16,128]{4,3,2,1,0} slice(multiply.21177), slice={[1:2], [0:1], [0:4096], [0:16], [0:128]}, sharding={devices=[1,1,1,8,1]0,1,2,3,4,5,6,7} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK(RunPass(m.get(), true)); auto elementwise_op = FindInstruction(m.get(), HloOpcode::kMultiply); EXPECT_FALSE(elementwise_op->has_sharding()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_mover_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cabe3974-40bd-4f00-8029-9f686f21bb85
cpp
tensorflow/tensorflow
hlo_phi_graph
third_party/xla/xla/service/hlo_phi_graph.cc
third_party/xla/xla/service/hlo_phi_graph_test.cc
#include "xla/service/hlo_phi_graph.h" #include <queue> namespace xla { HloValue::Id PhiGraph::GetOptimizedId(const HloValue& value) { Node* node = value_id_to_node_[value.id()]; CHECK(!node->mark_as_dead); return node->value_id; } bool PhiGraph::InputsEqualTo(const HloValue& value, absl::Span<const HloValue* const> inputs) { auto iter = value_id_to_node_.find(value.id()); CHECK(iter != value_id_to_node_.end()); absl::flat_hash_set<HloValue::Id> existing_set; for (Node* operand : iter->second->operands) { existing_set.insert(operand->value_id); } absl::flat_hash_set<HloValue::Id> new_set; for (const HloValue* input : inputs) { new_set.insert(input->id()); } return existing_set == new_set; } HloValue::Id PhiGraph::FindOptimizedValue(const HloValue::Id id) { auto iter = value_id_to_node_.find(id); CHECK(iter != value_id_to_node_.end()); CHECK(!iter->second->mark_as_dead); return iter->second->value_id; } PhiGraph::Node* PhiGraph::CreateOrReuseNode(const HloValue& value) { auto iter = value_id_to_node_.find(value.id()); if (iter == value_id_to_node_.end()) { node_storage_.emplace_back(std::make_unique<Node>()); Node* node = node_storage_.back().get(); node->value_id = value.id(); value_id_to_node_[value.id()] = node; node_to_value_id_[node].push_back(value.id()); return node; } else { CHECK_NE(iter->second, nullptr); CHECK_EQ(iter->second->value_id, value.id()); return iter->second; } } void PhiGraph::ReplaceNodeWith(PhiGraph::Node* node, PhiGraph::Node* replace) { CHECK(node->is_phi); if (node->mark_as_dead) { return; } if (replace->mark_as_dead) { auto iter = value_id_to_node_.find(replace->value_id); CHECK(iter != value_id_to_node_.end()); return ReplaceNodeWith(node, iter->second); } CHECK(!replace->mark_as_dead); for (Node* user : node->users) { absl::c_replace(user->operands, node, replace); } for (Node* operand : node->operands) { absl::c_replace(operand->users, node, replace); } for (HloValue::Id value_id : node_to_value_id_[node]) { CHECK(value_id_to_node_.contains(value_id)); value_id_to_node_[value_id] = replace; } absl::c_copy(node_to_value_id_[node], std::back_inserter(node_to_value_id_[replace])); node_to_value_id_[node].clear(); node->mark_as_dead = true; } void PhiGraph::RegisterPhi(const HloValue& value, absl::Span<const HloValue* const> inputs) { Node* node = CreateOrReuseNode(value); CHECK(value.is_phi()); node->is_phi = true; node->operands.clear(); for (auto input : inputs) { CHECK(input != nullptr); Node* input_node = CreateOrReuseNode(*input); node->operands.push_back(input_node); } } std::string PhiGraph::ToString() { std::string out = "PhiGraph: \n"; for (auto& node : node_storage_) { absl::StrAppend(&out, node->value_id); if (node->is_phi) { absl::StrAppend(&out, ", phi"); } if (node->mark_as_dead) { absl::StrAppend(&out, ", dead", ":\n"); } for (Node* input : node->operands) { absl::StrAppend(&out, " ", input->value_id, "\n"); } } return out; } void PhiGraph::Optimize() { VLOG(2) << "Optimizing phi graph:"; XLA_VLOG_LINES(2, ToString()); for (auto& node : node_storage_) { for (Node* input : node->operands) { input->users.push_back(node.get()); } } bool changed = true; while (changed) { changed = false; absl::flat_hash_set<Node*> checked_for_closure; for (auto& node : node_storage_) { if (!node->is_phi) { continue; } if (node->mark_as_dead) { continue; } Node* node_ptr = node.get(); VLOG(2) << "Optimizing: " << node_ptr->value_id; CHECK_GE(node_ptr->operands.size(), 1); auto it = absl::c_find(node_ptr->operands, node_ptr); while (it != node_ptr->operands.end()) { node_ptr->operands.erase(it); it = absl::c_find(node_ptr->operands, node_ptr); } it = absl::c_find(node_ptr->users, node_ptr); while (it != node_ptr->users.end()) { node_ptr->users.erase(it); it = absl::c_find(node_ptr->users, node_ptr); } CHECK_GE(node_ptr->operands.size(), 1); bool all_inputs_are_same = absl::c_all_of( node_ptr->operands, [&](Node* elem) { return elem == node_ptr->operands[0]; }); if (all_inputs_are_same) { VLOG(1) << "All inputs to node " << node_ptr->value_id << " are the same, replacing it with " << node_ptr->operands[0]->value_id; ReplaceNodeWith(node_ptr, node_ptr->operands[0]); changed = true; continue; } if (checked_for_closure.contains(node_ptr)) { continue; } absl::flat_hash_set<Node*> workset; std::queue<Node*> worklist; Node* non_phi = nullptr; worklist.push(node_ptr); while (!worklist.empty()) { Node* todo = worklist.front(); worklist.pop(); if (workset.contains(todo)) { continue; } checked_for_closure.insert(todo); workset.insert(todo); for (Node* operand : todo->operands) { worklist.push(operand); } if (!todo->is_phi) { if (non_phi != nullptr && non_phi != todo) { non_phi = nullptr; break; } else { non_phi = todo; } } } if (non_phi != nullptr) { for (Node* node : workset) { if (!node->is_phi) { CHECK_EQ(node, non_phi); continue; } VLOG(1) << "Replace node " << node->value_id << " in the closure with node " << non_phi->value_id; ReplaceNodeWith(node, non_phi); changed = true; } } } } } }
#include "xla/service/hlo_phi_graph.h" #include "xla/literal_util.h" #include "tsl/platform/test.h" namespace xla { namespace { class PhiGraphTest : public ::testing::Test { protected: HloValue NewHloValue(bool is_phi) { static int64_t id = 0; return HloValue(id++, dummy_inst_.get(), {}, is_phi); } void SetUp() override { dummy_inst_ = HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f)); } std::unique_ptr<HloInstruction> dummy_inst_; }; TEST_F(PhiGraphTest, SelfReferencingPhi) { PhiGraph phi_graph; HloValue A = NewHloValue(false); HloValue B = NewHloValue(true); phi_graph.RegisterPhi(B, {&A, &B}); phi_graph.Optimize(); EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id())); } TEST_F(PhiGraphTest, PhiWithSameInputs) { PhiGraph phi_graph; HloValue A = NewHloValue(false); HloValue B = NewHloValue(true); phi_graph.RegisterPhi(B, {&A, &A}); phi_graph.Optimize(); EXPECT_EQ(A.id(), phi_graph.FindOptimizedValue(B.id())); } TEST_F(PhiGraphTest, CircularPhi) { PhiGraph phi_graph; HloValue A = NewHloValue(true); HloValue B = NewHloValue(true); HloValue C = NewHloValue(true); HloValue D = NewHloValue(false); phi_graph.RegisterPhi(A, {&B, &C}); phi_graph.RegisterPhi(B, {&D, &C}); phi_graph.RegisterPhi(C, {&A, &B}); phi_graph.Optimize(); EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id())); EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id())); EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id())); } TEST_F(PhiGraphTest, NestedPhiReduction) { PhiGraph phi_graph; HloValue A = NewHloValue(true); HloValue B = NewHloValue(true); HloValue C = NewHloValue(true); HloValue D = NewHloValue(false); HloValue E = NewHloValue(true); phi_graph.RegisterPhi(A, {&B, &C}); phi_graph.RegisterPhi(B, {&E, &C}); phi_graph.RegisterPhi(C, {&A, &B}); phi_graph.RegisterPhi(E, {&D, &D}); phi_graph.Optimize(); EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(A.id())); EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(B.id())); EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(C.id())); EXPECT_EQ(D.id(), phi_graph.FindOptimizedValue(E.id())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_phi_graph_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
eb0c36ad-0dad-4007-b2bb-20fde4559604
cpp
tensorflow/tensorflow
tuple_util
third_party/xla/xla/service/tuple_util.cc
third_party/xla/xla/service/tuple_util_test.cc
#include "xla/service/tuple_util.h" #include <cstdint> #include <string> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_value.h" #include "xla/shape.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" #include "tsl/platform/statusor.h" namespace xla { HloInstruction* TupleUtil::ExtractPrefix(HloInstruction* input_tuple, int64_t elements, absl::string_view name) { CHECK(input_tuple->shape().IsTuple()); HloComputation* computation = input_tuple->parent(); const Shape& input_shape = input_tuple->shape(); std::vector<HloInstruction*> tuple_elements; tuple_elements.reserve(elements); for (int i = 0; i < elements; i++) { std::string element_name; if (!name.empty()) { element_name = absl::StrCat(name, ".element.", i); } tuple_elements.push_back(computation->AddInstruction( HloInstruction::CreateGetTupleElement(input_shape.tuple_shapes(i), input_tuple, i), element_name)); } return computation->AddInstruction( HloInstruction::CreateTuple(tuple_elements), name); } HloInstruction* TupleUtil::AppendSuffix( HloInstruction* input_tuple, absl::Span<HloInstruction* const> trailing_values) { CHECK(input_tuple->shape().IsTuple()); HloComputation* computation = input_tuple->parent(); const Shape& input_shape = input_tuple->shape(); std::vector<HloInstruction*> tuple_elements; tuple_elements.reserve(input_shape.tuple_shapes_size()); for (int i = 0; i < input_shape.tuple_shapes_size(); i++) { tuple_elements.push_back( computation->AddInstruction(HloInstruction::CreateGetTupleElement( input_shape.tuple_shapes(i), input_tuple, i))); } tuple_elements.insert(tuple_elements.end(), trailing_values.begin(), trailing_values.end()); return computation->AddInstruction( HloInstruction::CreateTuple(tuple_elements)); } absl::StatusOr<HloInstruction*> TupleUtil::ReplaceTupleWith( HloInstruction* new_instruction, HloInstruction* tuple, ShapeIndex shape_index, bool insert_bitcast_if_different_shape) { const Shape& tuple_shape = tuple->shape(); CHECK(tuple->shape().IsTuple()) << "ReplaceTupleWith was called for a non-tuple. Tuple = " << tuple->ToString() << ", new_instruction = " << new_instruction->ToString() << ", shape_index = " << shape_index.ToString(); const HloInstruction* instruction = new_instruction; bool equivalent = true; for (int i = shape_index.size() - 1; i >= 0; --i) { int index = shape_index[i]; if (instruction->opcode() != HloOpcode::kGetTupleElement || instruction->tuple_index() != index) { equivalent = false; break; } instruction = instruction->operand(0); } if (equivalent && instruction == tuple) { VLOG(4) << "Instruction " << new_instruction->ToShortString() << " already exists at index " << shape_index.ToString() << " of " << tuple->ToShortString(); return tuple; } HloComputation* computation = new_instruction->parent(); std::vector<HloInstruction*> tuple_args(tuple_shape.tuple_shapes_size()); CHECK_GE(tuple_shape.tuple_shapes_size(), shape_index[0]); for (int i = 0; i < tuple_shape.tuple_shapes_size(); ++i) { const Shape& subshape = tuple_shape.tuple_shapes(i); auto get_operand = [&]() { if (tuple->opcode() == HloOpcode::kTuple) { return tuple->mutable_operand(i); } else { return computation->AddInstruction( HloInstruction::CreateGetTupleElement(subshape, tuple, i)); } }; if (i == shape_index[0]) { if (subshape.IsTuple()) { TF_ASSIGN_OR_RETURN(tuple_args[i], ReplaceTupleWith(new_instruction, get_operand(), ShapeIndex(shape_index.begin() + 1, shape_index.end()))); } else { if (subshape != new_instruction->shape() && insert_bitcast_if_different_shape) { VLOG(4) << "Old shape = " << subshape.ToString() << ", new shape = " << new_instruction->shape().ToString() << "; inserting a bitcast."; new_instruction = computation->AddInstruction( HloInstruction::CreateBitcast(subshape, new_instruction)); } else if (tuple->opcode() == HloOpcode::kTuple && tuple->operand(i) == new_instruction) { VLOG(4) << "Tuple already contains the new instruction = " << new_instruction->ToShortString() << " tuple = " << tuple->ToShortString(); return tuple; } tuple_args[i] = new_instruction; } } else { tuple_args[i] = get_operand(); } } if (shape_index[0] == tuple_shape.tuple_shapes_size()) { tuple_args.push_back(new_instruction); } return computation->AddInstruction(HloInstruction::CreateTuple(tuple_args)); } HloInstruction* TupleUtil::AddGetTupleElements( const HloPosition& position) { HloInstruction* instruction = position.instruction; HloComputation* computation = instruction->parent(); for (int64_t index : position.index) { auto gte_it = absl::c_find_if( instruction->users(), [index](const HloInstruction* use) { return use != use->parent()->root_instruction() && use->opcode() == HloOpcode::kGetTupleElement && use->tuple_index() == index; }); if (gte_it != instruction->users().end()) { instruction = *gte_it; } else { instruction = computation->AddInstruction(HloInstruction::CreateGetTupleElement( instruction->shape().tuple_shapes(index), instruction, index)); } } return instruction; } ShapeTree<HloInstruction*> TupleUtil::DisassembleTupleInstruction( HloInstruction* tuple) { const Shape& shape = tuple->shape(); ShapeTree<HloInstruction*> result(shape); result.ForEachMutableElement([&](ShapeIndexView index, HloInstruction** element) { if (index.empty()) { *element = tuple; } else { ShapeIndexView parent_index = index.subspan(0, index.size() - 1); HloInstruction* parent = result.element(parent_index); std::string name = absl::StrCat(tuple->name(), ".disassembled.", absl::StrJoin(index, ".")); *element = tuple->parent()->AddInstruction( HloInstruction::CreateGetTupleElement(parent, index.back()), name); } }); return result; } HloInstruction* TupleUtil::AssembleTupleInstruction( HloComputation* computation, ShapeTree<HloInstruction*> elements, absl::string_view name) { elements.ForEachMutableElementPostOrder( [&](const ShapeIndex& index, HloInstruction** element) { const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index); if (subshape.IsTuple()) { absl::InlinedVector<HloInstruction*, 2> children; ShapeIndex child_index = index; for (int i = 0; i < subshape.tuple_shapes_size(); ++i) { child_index.push_back(i); children.push_back(elements.element(child_index)); child_index.pop_back(); } std::string new_name; if (!name.empty()) { if (index.empty()) { new_name = std::string(name); } else { new_name = absl::StrCat(name, ".assembled.", absl::StrJoin(index, ".")); } } *element = computation->AddInstruction( HloInstruction::CreateTuple(children), new_name); } }); return elements.element({}); } }
#include "xla/service/tuple_util.h" #include <memory> #include <string> #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_parser.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" namespace xla { namespace { namespace op = ::xla::testing::opcode_matchers; using TupleUtilTest = HloTestBase; TEST_F(TupleUtilTest, ExtractPrefix) { const std::string hlo_string = R"( HloModule Module ENTRY entry { p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0) ROOT p1 = f32[32,32]{1,0} parameter(1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* param0 = module->entry_computation()->parameter_instruction(0); HloInstruction* prefix = TupleUtil::ExtractPrefix(param0, 2); EXPECT_THAT(prefix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0), op::GetTupleElement(op::Parameter(0), 1))); } TEST_F(TupleUtilTest, AppendSuffix) { const std::string hlo_string = R"( HloModule Module ENTRY entry { p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0) ROOT p1 = f32[32,32]{1,0} parameter(1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* param0 = module->entry_computation()->parameter_instruction(0); HloInstruction* param1 = module->entry_computation()->parameter_instruction(1); HloInstruction* with_suffix = TupleUtil::AppendSuffix(param0, {param1, param1}); EXPECT_THAT(with_suffix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0), op::GetTupleElement(op::Parameter(0), 1), op::GetTupleElement(op::Parameter(0), 2), op::Parameter(1), op::Parameter(1))); } TEST_F(TupleUtilTest, ReplaceTupleWithTupleInst) { const std::string hlo_string = R"( HloModule Module ENTRY entry { p0 = f32[32,32]{1,0} parameter(0) p1 = f32[32,32]{1,0} parameter(1) ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p0, p1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* p0 = FindInstruction(module.get(), "p0"); HloInstruction* tuple = FindInstruction(module.get(), "tuple"); TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple, TupleUtil::ReplaceTupleWith(p0, tuple, {1})); EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(0), op::Parameter(0))); } TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInst) { const std::string hlo_string = R"( HloModule Module ENTRY entry { ROOT p0 = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0) p1 = f32[32,32]{1,0} parameter(1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* p0 = FindInstruction(module.get(), "p0"); HloInstruction* p1 = FindInstruction(module.get(), "p1"); TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple, TupleUtil::ReplaceTupleWith(p1, p0, {0})); EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(1), op::GetTupleElement(op::Parameter(0), 1))); } TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInstNested) { const std::string hlo_string = R"( HloModule Module ENTRY entry { ROOT p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0) p1 = f32[32,32]{1,0} parameter(1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* p0 = FindInstruction(module.get(), "p0"); HloInstruction* p1 = FindInstruction(module.get(), "p1"); TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple, TupleUtil::ReplaceTupleWith(p1, p0, {1, 0})); EXPECT_THAT( new_tuple, op::Tuple(op::GetTupleElement(op::Parameter(0), 0), op::Tuple(op::Parameter(1), op::GetTupleElement( op::GetTupleElement(op::Parameter(0), 1), 1)))); } TEST_F(TupleUtilTest, AddGetTupleElements) { const std::string hlo_string = R"( HloModule Module ENTRY entry { p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0) gte = (f32[32,32]{1,0}, f32[32,32]{1,0}) get-tuple-element(p0), index=1 ROOT root = f32[32,32]{1,0} get-tuple-element(gte), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* p0 = FindInstruction(module.get(), "p0"); HloInstruction* existing_gte = FindInstruction(module.get(), "gte"); HloInstruction* new_gte = TupleUtil::AddGetTupleElements({p0, {1, 0}}); EXPECT_THAT(new_gte, op::GetTupleElement(existing_gte, 0)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0c1e0df2-8f32-4754-b90e-7b6a758a3abc
cpp
tensorflow/tensorflow
all_gather_decomposer
third_party/xla/xla/service/all_gather_decomposer.cc
third_party/xla/xla/service/all_gather_decomposer_test.cc
#include "xla/service/all_gather_decomposer.h" #include <cstdint> #include <optional> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/service/collective_decomposer_utils.h" #include "xla/service/collective_ops_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace { HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) { HloComputation::Builder sum_b("add"); auto x = sum_b.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(type, {}), "x")); auto y = sum_b.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(type, {}), "y")); if (type == PRED) { sum_b.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y)); } else { sum_b.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y)); } HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build()); return reduction; } } HloInstruction* AllGatherDecomposer::TranslateAllGatherToAllReducePerOperand( CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag, const Shape& output_shape, HloInstruction* operand, HloComputation* comp, int64_t ag_dim) { std::vector<HloInstruction*> start_indices = CreateStartIndicesForCollectiveDecomposition( group_mode, ag.replica_groups(), operand->shape(), ag_dim, comp) .value(); auto zero = comp->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(output_shape.element_type()))); zero = comp->AddInstruction( HloInstruction::CreateBroadcast(output_shape, zero, {})); auto dus = comp->AddInstruction(HloInstruction::CreateDynamicUpdateSlice( zero->shape(), zero, operand, start_indices)); auto ar = comp->AddInstruction(HloInstruction::CreateAllReduce( dus->shape(), {dus}, MakeBinaryAdd(dus->shape().element_type(), comp->parent()), ag.device_list(), ag.constrain_layout(), ag.channel_id(), ag.use_global_device_ids())); return ar; } absl::Status AllGatherDecomposer::DecomposeAllGather( HloAllGatherInstruction* ag, HloComputation* comp) { TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode, GetCollectiveOpGroupMode(ag->channel_id().has_value(), ag->use_global_device_ids())); if (ag->operand_count() > 1) { std::vector<HloInstruction*> tuple_inputs; for (int i = 0; i < ag->operand_count(); ++i) { auto* input_operand = ag->mutable_operand(i); const auto& output_shape = ag->shape().tuple_shapes(i); auto* ar = TranslateAllGatherToAllReducePerOperand( group_mode, *ag, output_shape, input_operand, comp, ag->all_gather_dimension()); tuple_inputs.push_back(ar); } auto tup = comp->AddInstruction(HloInstruction::CreateTuple(tuple_inputs)); TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(tup)); } else { auto* ar = TranslateAllGatherToAllReducePerOperand( group_mode, *ag, ag->shape(), ag->mutable_operand(0), comp, ag->all_gather_dimension()); TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(ar)); } TF_RETURN_IF_ERROR(comp->RemoveInstructionAndUnusedOperands(ag)); return absl::OkStatus(); } absl::StatusOr<bool> AllGatherDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (auto comp : module->MakeNonfusionComputations(execution_threads)) { for (auto hlo : comp->MakeInstructionPostOrder()) { if (hlo->opcode() != HloOpcode::kAllGather) { continue; } auto ag = Cast<HloAllGatherInstruction>(hlo); if (ShouldDecompose(*ag)) { TF_RETURN_IF_ERROR(DecomposeAllGather(ag, comp)); changed = true; } } } return changed; } }
#include "xla/service/all_gather_decomposer.h" #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::testing::AllOf; namespace op = xla::testing::opcode_matchers; using AllGatherDecomposerTest = HloTestBase; TEST_F(AllGatherDecomposerTest, CrossReplicaAllGather) { const std::string module_str = R"( HloModule module ENTRY entry { param0 = f32[10,20] parameter(0) ROOT ag = f32[10,80] all-gather(param0), replica_groups={}, dimensions={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); AllGatherDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::AllReduce(op::DynamicUpdateSlice( op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(), op::Multiply(op::ReplicaId(), op::Constant())))); } TEST_F(AllGatherDecomposerTest, CrossReplicaAndPartitionAllGather) { const std::string module_str = R"( HloModule module ENTRY entry { param0 = f32[10,20] parameter(0) ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0}}, channel_id=1, dimensions={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); AllGatherDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::AllReduce(op::DynamicUpdateSlice( op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(), op::Multiply(op::PartitionId(), op::Constant())))); } TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTrivialGroup) { const std::string module_str = R"( HloModule module ENTRY entry { param0 = f32[10,20] parameter(0) ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0,1,2,3}}, dimensions={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); AllGatherDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::AllReduce(op::DynamicUpdateSlice( op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(), op::Multiply(op::ReplicaId(), op::Constant())))); } TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroups) { const std::string module_str = R"( HloModule module ENTRY entry { param0 = f32[10,20] parameter(0) ROOT ag = f32[10,80] all-gather(param0), replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); AllGatherDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); auto id = AllOf(op::Shape("u32[]"), op::Reshape(op::DynamicSlice(op::Constant(), op::ReplicaId()))); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllReduce(op::DynamicUpdateSlice( op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(), op::Multiply(id, op::Constant())))); } TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroupsGlobalIds) { const std::string module_str = R"( HloModule module ENTRY entry { param0 = f32[10,20] parameter(0) ROOT ag = f32[10,80] all-gather(param0), replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}, channel_id=1, use_global_device_ids=true } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); AllGatherDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); auto global_id = op::Add(op::Multiply(op::ReplicaId(), op::Constant()), op::PartitionId()); auto id = AllOf(op::Shape("u32[]"), op::Reshape(op::DynamicSlice(op::Constant(), global_id))); EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllReduce(op::DynamicUpdateSlice( op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(), op::Multiply(id, op::Constant())))); } TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTuple) { const std::string module_str = R"( HloModule module ENTRY entry { param0 = f32[10,20] parameter(0) param1 = f32[10,16] parameter(1) ROOT ag = (f32[10,80], f32[10,64]) all-gather(param0, param1), replica_groups={}, dimensions={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((module_str))); AllGatherDecomposer decomposer; TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple( op::AllReduce(op::DynamicUpdateSlice( op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(), op::Multiply(op::ReplicaId(), op::Constant()))), op::AllReduce(op::DynamicUpdateSlice( op::Broadcast(op::Constant()), op::Parameter(1), op::Constant(), op::Multiply(op::ReplicaId(), op::Constant()))))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
88b9adff-15bb-48a6-920b-e030f186c9d9
cpp
tensorflow/tensorflow
all_reduce_folder
third_party/xla/xla/service/all_reduce_folder.cc
third_party/xla/xla/service/all_reduce_folder_test.cc
#include "xla/service/all_reduce_folder.h" #include <algorithm> #include <cstdint> #include <optional> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/collective_device_list.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/all_reduce_key.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" namespace xla { namespace { std::optional<std::vector<ReplicaGroup>> FoldReplicaGroups( absl::Span<const ReplicaGroup> replica_groups0, absl::Span<const ReplicaGroup> replica_groups1) { int64_t num_replicas = 0; for (const ReplicaGroup &rg : replica_groups0) { for (int64_t id : rg.replica_ids()) { num_replicas = std::max(num_replicas, id); } } num_replicas++; std::vector<int> replica_group_no(num_replicas, -1); for (int group_no = 0; group_no < replica_groups0.size(); ++group_no) { for (int64_t id : replica_groups0[group_no].replica_ids()) { replica_group_no[id] = group_no; } } absl::flat_hash_map<std::vector<bool>, int64_t> contributor_set_id; std::vector<int64_t> contributing_replicas_set_id(num_replicas, 0); int64_t next_id = 1; for (const ReplicaGroup &rg : replica_groups1) { std::vector<bool> contributors(num_replicas, false); for (int64_t id : rg.replica_ids()) { int64_t group_no = replica_group_no[id]; for (int64_t contrib : replica_groups0[group_no].replica_ids()) { if (contributors[contrib]) { return std::nullopt; } contributors[contrib] = true; } } int64_t set_id; auto it = contributor_set_id.find(contributors); if (it != contributor_set_id.end()) { set_id = it->second; } else { set_id = next_id++; contributor_set_id[contributors] = set_id; } for (int64_t id : rg.replica_ids()) { contributing_replicas_set_id[id] = set_id; } } std::vector<ReplicaGroup> new_replica_groups; new_replica_groups.reserve(contributor_set_id.size()); for (const auto &it : contributor_set_id) { const std::vector<bool> &contributors = it.first; const int64_t set_id = it.second; new_replica_groups.emplace_back(); ReplicaGroup &group = new_replica_groups.back(); for (int64_t replica = 0; replica < num_replicas; ++replica) { if (contributors[replica]) { if (contributing_replicas_set_id[replica] != set_id) { return std::nullopt; } group.add_replica_ids(replica); } } } absl::c_sort(new_replica_groups, [](const ReplicaGroup &a, const ReplicaGroup &b) { return a.replica_ids(0) < b.replica_ids(0); }); return new_replica_groups; } } absl::StatusOr<bool> AllReduceFolder::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) { VLOG(1) << "Skip AllReduceFolder because the module contains all-reduce " "with constrained layouts"; return false; } int64_t next_channel_id = hlo_query::NextChannelId(*module); bool changed = false; for (auto computation : module->computations(execution_threads)) { for (HloInstruction *inst : computation->MakeInstructionPostOrder()) { if (inst->opcode() != HloOpcode::kAllReduce || inst->operand(0)->opcode() != HloOpcode::kAllReduce) { continue; } auto *ar0 = Cast<HloAllReduceInstruction>(inst->mutable_operand(0)); auto *ar1 = Cast<HloAllReduceInstruction>(inst); if (ar0->user_count() != 1) { continue; } std::optional<AllReduceKey> key0 = GetAllReduceKey( ar0, nullptr, true); std::optional<AllReduceKey> key1 = GetAllReduceKey( ar1, nullptr, true); if (!key0 || !key1 || *key0 != *key1 || ar0->replica_groups().empty() || ar1->replica_groups().empty()) { continue; } std::optional<std::vector<ReplicaGroup>> new_replica_groups = FoldReplicaGroups(ar0->replica_groups(), ar1->replica_groups()); if (!new_replica_groups) { continue; } std::optional<int64_t> channel_id; if (ar0->channel_id()) { channel_id = next_channel_id++; } HloInstruction *new_ar = computation->AddInstruction(HloInstruction::CreateAllReduce( ar0->shape(), ar0->operands(), ar0->to_apply(), CollectiveDeviceList(*new_replica_groups), false, channel_id, ar0->use_global_device_ids())); TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(new_ar)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0)); changed = true; } } return changed; } }
#include "xla/service/all_reduce_folder.h" #include <cstddef> #include <initializer_list> #include <memory> #include "absl/algorithm/container.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace matcher = xla::testing::opcode_matchers; using ::testing::HasSubstr; class AllReduceFolderTest : public HloTestBase {}; const char *k2AllReduce = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) ar0 = f32[8] all-reduce(p0), replica_groups=$group_0, to_apply=sum ROOT ar1 = f32[8] all-reduce(ar0), replica_groups=$group_1, to_apply=sum } )"; size_t AllReduceCount(HloModule *module) { return absl::c_count_if(module->entry_computation()->instructions(), HloPredicateIsOp<HloOpcode::kAllReduce>); } void ExpectOneAllReduce(HloModule *module, absl::string_view target_replica_groups) { EXPECT_EQ(AllReduceCount(module), 1); HloInstruction *root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, matcher::AllReduce(matcher::Parameter(0))); EXPECT_THAT(root->ToString(), HasSubstr(target_replica_groups)); } TEST_F(AllReduceFolderTest, Simple) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true, {{"$group_0", "{{0,1},{2,3}}"}, {"$group_1", "{{0,2},{1,3}}"}})); ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3}}"); } TEST_F(AllReduceFolderTest, SimpleSwap) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true, {{"$group_1", "{{0,1},{2,3}}"}, {"$group_0", "{{0,2},{1,3}}"}})); ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3}}"); } TEST_F(AllReduceFolderTest, BothEmptyReplicaGroups_NotTransformed) { TF_ASSERT_OK(RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), false, {{"$group_0", "{}"}, {"$group_1", "{}"}})); } TEST_F(AllReduceFolderTest, EmptyReplicaGroups_NotTransformed) { TF_ASSERT_OK(RunAndCheckHloRewrite( k2AllReduce, AllReduceFolder(), false, {{"$group_0", "{}"}, {"$group_1", "{{0,2},{1,3}}"}})); } TEST_F(AllReduceFolderTest, MismatchOtherProperties0_NotTransformed) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, channel_id=1, to_apply=sum ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=sum } )"; TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false)); } TEST_F(AllReduceFolderTest, MismatchOtherProperties1_NotTransformed) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } mul { a = f32[] parameter(0) b = f32[] parameter(1) ROOT mul = f32[] multiply(a, b) } ENTRY main { p0 = f32[8] parameter(0) ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=mul } )"; TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false)); } TEST_F(AllReduceFolderTest, NotFoldable_NotTransformed) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,1},{2,3}}, to_apply=sum } )"; TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false)); } TEST_F(AllReduceFolderTest, Foldable0) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) ar0 = f32[8] all-reduce(p0), replica_groups={{0,4},{1,5},{2,3},{6,7}}, to_apply=sum ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,5},{4,1},{2,7},{3,6}}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunAndCheckHloRewrite(hlo_string, AllReduceFolder())); ExpectOneAllReduce(module.get(), "replica_groups={{0,1,4,5},{2,3,6,7}}"); } TEST_F(AllReduceFolderTest, FoldableChain) { absl::string_view hlo_string = R"( HloModule m sum { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add.2 = f32[] add(a, b) } ENTRY main { p0 = f32[8] parameter(0) ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=sum ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3},{4,6},{5,7}}, to_apply=sum ROOT ar2 = f32[8] all-reduce(ar1), replica_groups={{0,4},{1,5},{2,6},{3,7}}, to_apply=sum } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunAndCheckHloRewrite(hlo_string, AllReduceFolder())); ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3,4,5,6,7}}"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1adb9aee-06f6-4921-bc77-f195f56319b6
cpp
tensorflow/tensorflow
sharding_remover
third_party/xla/xla/service/sharding_remover.cc
third_party/xla/xla/service/sharding_remover_test.cc
#include "xla/service/sharding_remover.h" #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/spmd/shardy/constants.h" #include "tsl/platform/errors.h" namespace xla { absl::StatusOr<bool> ShardingRemover::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; const absl::flat_hash_set<absl::string_view> to_remove_sharding_ops = { "Sharding", "SPMDShardToFullShape", "SPMDFullToShardShape", sdy::kFuncResultShardingTargetName}; for (HloComputation* computation : module->computations(execution_threads)) { auto instructions = computation->MakeInstructionPostOrder(); std::reverse(instructions.begin(), instructions.end()); for (HloInstruction* instruction : instructions) { if (instruction->opcode() != HloOpcode::kCustomCall) { continue; } if (!to_remove_sharding_ops.contains(instruction->custom_call_target())) { continue; } CHECK(instruction->operand_count() == 1) << "Sharding instruction must have exactly one operand"; TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith( instruction->mutable_operand(0), name())); changed = true; if (instruction->custom_call_target() == "Sharding" || instruction->custom_call_target() == sdy::kFuncResultShardingTargetName) { auto copy = computation->AddInstruction( HloInstruction::CreateUnary(instruction->shape(), HloOpcode::kCopy, instruction->mutable_operand(0))); TF_RETURN_IF_ERROR(computation->ReplaceInstruction(instruction, copy)); instruction = copy; } } } return changed; } }
#include "xla/service/sharding_remover.h" #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_parser.h" #include "xla/status_macros.h" #include "xla/tests/hlo_test_base.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { using ShardingRemoverTest = HloTestBase; TEST_F(ShardingRemoverTest, RemoveSharding) { const char* const hlo_string = R"( HloModule module ENTRY entry { %parameter.3379 = f32[1,1]{1,0} parameter(0) %custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379), custom_call_target="Sharding", sharding={replicated} ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get())); EXPECT_TRUE(changed); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Parameter())); auto parameter = root->operand(0); EXPECT_EQ(parameter->user_count(), 2); bool replaced = false; for (HloInstruction* user : parameter->users()) { if (user->opcode() == HloOpcode::kCopy) { replaced = true; EXPECT_THAT(user, op::Copy(op::Parameter())); break; } } EXPECT_TRUE(replaced); } TEST_F(ShardingRemoverTest, RemoveSPMDShardingToFullShape) { const char* const hlo_string = R"( HloModule module ENTRY entry { %parameter.3379 = f32[1,1]{1,0} parameter(0) %custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379), custom_call_target="SPMDShardToFullShape", sharding={replicated} ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get())); EXPECT_TRUE(changed); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Parameter())); } TEST_F(ShardingRemoverTest, RemoveSPMDFullToShardShape) { const char* const hlo_string = R"( HloModule module ENTRY entry { %parameter.3379 = f32[1,1]{1,0} parameter(0) %custom-call.3380 = f32[1,1]{1,0} custom-call(f32[1,1]{1,0} %parameter.3379), custom_call_target="SPMDFullToShardShape", sharding={replicated} ROOT %reshape.6032 = f32[] reshape(f32[1,1]{1,0} %custom-call.3380) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get())); EXPECT_TRUE(changed); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Reshape(op::Parameter())); } TEST_F(ShardingRemoverTest, NoChangeForOtherCustomCall) { const char* const hlo_string = R"( HloModule cluster_2013453984438090939__.47 ENTRY %cluster_2013453984438090939__.47 (arg_tuple.1: ()) -> (bf16[2,2000], s32[2,2000]) { %arg_tuple.1 = bf16[2,209664] parameter(0) %custom-call = (bf16[2,2000]{1,0}, s32[2,2000]{1,0}) custom-call(bf16[2,209664]{1,0} %arg_tuple.1), custom_call_target="TopK" %get-tuple-element = bf16[2,2000]{1,0} get-tuple-element((bf16[2,2000]{1,0}, s32[2,2000]{1,0}) %custom-call), index=0 %get-tuple-element.1 = s32[2,2000]{1,0} get-tuple-element((bf16[2,2000]{1,0}, s32[2,2000]{1,0}) %custom-call), index=1, sharding={replicated} ROOT %tuple.46 = (bf16[2,2000]{1,0}, s32[2,2000]{1,0}) tuple(bf16[2,2000]{1,0} %get-tuple-element, s32[2,2000]{1,0} %get-tuple-element.1), metadata={op_name="XLA_Retvals"} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingRemover().Run(module.get())); EXPECT_FALSE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_remover.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_remover_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
de45acd3-74d9-4eaa-9c40-ea4acfaf2a07
cpp
tensorflow/tensorflow
tuple_points_to_analysis
third_party/xla/xla/service/tuple_points_to_analysis.cc
third_party/xla/xla/service/tuple_points_to_analysis_test.cc
#include "xla/service/tuple_points_to_analysis.h" #include <memory> #include <ostream> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/map_util.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/shape_util.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { std::string BufferAlias::ToString() const { return absl::StrCat("BufferAlias(", instruction_->name(), "[", absl::StrJoin(index_, ","), "])"); } std::ostream& operator<<(std::ostream& out, const BufferAlias& buffer_alias) { out << buffer_alias.ToString(); return out; } bool PointsToSet::IsAmbiguous() const { bool ambiguous = false; ForEachElement( [&ambiguous](const ShapeIndex& , const BufferList& points_to) { ambiguous |= points_to.size() > 1; }); return ambiguous; } bool PointsToSet::IsDistinct() const { bool distinct = true; absl::flat_hash_set<const LogicalBuffer*> all_points_to; ForEachElement([&](const ShapeIndex& , const BufferList& points_to) { for (auto& buffer : points_to) { if (all_points_to.contains(buffer)) { distinct = false; } all_points_to.insert(buffer); } }); return distinct; } size_t PointsToSet::size() const { return CreateFlattenedSet().size(); } PointsToSet::BufferSet PointsToSet::CreateFlattenedSet() const { BufferSet flat_set; ForEachElement( [&flat_set](const ShapeIndex& , const BufferList& buffers) { flat_set.insert(buffers.begin(), buffers.end()); }); return flat_set; } bool PointsToSet::ContainsBuffer(const LogicalBuffer& buffer) const { bool found = false; ForEachElement([&found, &buffer](const ShapeIndex& , const BufferList& pointed_to_buffers) { if (!found && absl::c_linear_search(pointed_to_buffers, &buffer)) { found = true; } }); return found; } bool PointsToSet::ContainsBufferAtIndex(const LogicalBuffer& buffer, const ShapeIndex& index) const { const auto& pointed_to_buffers = element(index); return absl::c_linear_search(pointed_to_buffers, &buffer); } void PointsToSet::AddPointedToBuffer(const LogicalBuffer& buffer, const ShapeIndex& index) { if (ContainsBufferAtIndex(buffer, index)) { return; } mutable_element(index)->push_back(&buffer); } const PointsToSet::SourceSet& PointsToSet::tuple_sources( const ShapeIndex& index) const { return tree_.element(index).tuple_sources; } void PointsToSet::add_tuple_source(const ShapeIndex& index, HloInstruction* tuple) { tree_.mutable_element(index)->tuple_sources.insert(tuple); } namespace { void GatherFusionInstructions( HloInstruction* instruction, std::vector<HloInstruction*>* fusion_instructions) { CHECK_EQ(HloOpcode::kFusion, instruction->opcode()); for (auto* fused : instruction->fused_instructions()) { if (fused->opcode() == HloOpcode::kFusion) { GatherFusionInstructions(fused, fusion_instructions); } } fusion_instructions->push_back(instruction); } } absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>> TuplePointsToAnalysis::Run(const HloModule* module) { auto logical_buffer_analysis = LogicalBufferAnalysis::Run(module); std::unique_ptr<TuplePointsToAnalysis> analysis(new TuplePointsToAnalysis( module, std::move(logical_buffer_analysis).value())); TF_RETURN_IF_ERROR(analysis->Analyze()); return std::move(analysis); } absl::Status TuplePointsToAnalysis::Analyze() { per_instruction_.clear(); per_instruction_.reserve(module_->instruction_count()); logical_buffer_aliases_.clear(); logical_buffer_aliases_.resize( logical_buffer_analysis_->num_logical_buffers()); std::vector<HloInstruction*> fusion_instructions; for (auto* computation : module_->MakeNonfusionComputations()) { TF_RETURN_IF_ERROR(computation->Accept(this)); TF_RETURN_IF_ERROR( PopulateDefinedBuffersAndAliases(computation->instructions())); for (auto* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kFusion) { GatherFusionInstructions(instruction, &fusion_instructions); } } } for (auto* instruction : fusion_instructions) { TF_RETURN_IF_ERROR(instruction->fused_expression_root()->Accept(this)); TF_RETURN_IF_ERROR( PopulateDefinedBuffersAndAliases(instruction->fused_instructions())); } XLA_VLOG_LINES(3, ToString()); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::PopulateDefinedBuffersAndAliases( const decltype(std::declval<HloComputation>() .instructions())& instructions) { for (auto* instruction : instructions) { PerInstruction* pi = PerInst(instruction); TF_RETURN_IF_ERROR(GatherBuffersDefinedByInstruction( instruction, &pi->instruction_defined_buffers)); const PointsToSet& points_to_set = GetPointsToSet(instruction); points_to_set.ForEachElement( [this, &instruction]( const ShapeIndex& index, const PointsToSet::BufferList& pointed_to_buffers) { for (const LogicalBuffer* buffer : pointed_to_buffers) { logical_buffer_aliases_[buffer->id()].emplace_back(instruction, index); } }); } return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::DefaultAction( HloInstruction* hlo_instruction) { PointsToSet& points_to_set = CreateEmptyPointsToSet(hlo_instruction); points_to_set.ForEachMutableElement( [this, hlo_instruction](const ShapeIndex& index, PointsToSet::BufferList* buffers) { buffers->push_back( &logical_buffer_analysis_->GetBuffer(hlo_instruction, index)); }); if (hlo_instruction->shape().IsTuple()) { points_to_set.add_tuple_source({}, hlo_instruction); } return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleGetTupleElement( HloInstruction* get_tuple_element) { int64_t element_index = get_tuple_element->tuple_index(); PointsToSet& points_to_set = CreateEmptyPointsToSet(get_tuple_element); const PointsToSet& operand_points_to_set = *PerInst(get_tuple_element->operand(0))->points_to_set; points_to_set.ForEachMutableElement( [&](const ShapeIndex& target_index, PointsToSet::BufferList* points_to) { ShapeIndex src_index; src_index.push_back(element_index); for (auto element : target_index) { src_index.push_back(element); } *points_to = operand_points_to_set.element(src_index); for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCopy(HloInstruction* copy) { PointsToSet& points_to_set = CreateCopiedPointsToSet(copy, copy->operand(0)); points_to_set.mutable_element({})->clear(); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(copy, {}), {}); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleBitcast(HloInstruction* bitcast) { CreateCopiedPointsToSet(bitcast, bitcast->operand(0)); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleDomain(HloInstruction* domain) { CreateCopiedPointsToSet(domain, domain->operand(0)); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAddDependency( HloInstruction* add_dependency) { CreateCopiedPointsToSet(add_dependency, add_dependency->operand(0)); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) { PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(recv_done, {}), {}); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(recv_done, {1}), {1}); const PointsToSet& operand_points_to_set = GetPointsToSet(recv_done->operand(0)); points_to_set.ForEachMutableElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& index, PointsToSet::BufferList* buffers) { if (index.empty() || index[0] != 0) { return; } *buffers = operand_points_to_set.element(index); for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) { points_to_set.add_tuple_source(index, tuple_source); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAsyncStart( HloInstruction* async_start) { PointsToSet& points_to_set = CreateEmptyPointsToSet(async_start); points_to_set.ForEachMutableElement( [&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) { if (target_index.size() >= 2 && target_index.front() == 0) { const PointsToSet& operand_points_to_set = GetPointsToSet(async_start->operand(target_index[1])); ShapeIndex source_index(target_index.begin() + 2, target_index.end()); *buffers = operand_points_to_set.element(source_index); for (HloInstruction* tuple : operand_points_to_set.tuple_sources(source_index)) { points_to_set.add_tuple_source(target_index, tuple); } } else { buffers->push_back( &logical_buffer_analysis_->GetBuffer(async_start, target_index)); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAsyncUpdate( HloInstruction* async_update) { PointsToSet& points_to_set = CreateEmptyPointsToSet(async_update); const PointsToSet& operand_points_to_set = GetPointsToSet(async_update->operand(0)); CHECK_EQ(async_update->shape(), async_update->operand(0)->shape()); points_to_set.ForEachMutableElement([&](const ShapeIndex& index, PointsToSet::BufferList* buffers) { *buffers = operand_points_to_set.element(index); for (HloInstruction* tuple : operand_points_to_set.tuple_sources(index)) { points_to_set.add_tuple_source(index, tuple); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleAsyncDone( HloInstruction* async_done) { PointsToSet& points_to_set = CreateEmptyPointsToSet(async_done); const PointsToSet& operand_points_to_set = GetPointsToSet(async_done->operand(0)); operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { if (!src_index.empty() && src_index.front() == 1) { const ShapeIndex target_index(src_index.begin() + 1, src_index.end()); *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCopyStart( HloInstruction* copy_start) { PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_start); const PointsToSet& operand_points_to_set = GetPointsToSet(copy_start->operand(0)); points_to_set.ForEachMutableElement( [&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) { if (target_index == ShapeIndex({1})) { *buffers = operand_points_to_set.element({}); } else { buffers->push_back( &logical_buffer_analysis_->GetBuffer(copy_start, target_index)); } }); for (HloInstruction* tuple : operand_points_to_set.tuple_sources({})) { points_to_set.add_tuple_source({1}, tuple); } return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCopyDone(HloInstruction* copy_done) { PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_done); const PointsToSet& operand_points_to_set = GetPointsToSet(copy_done->operand(0)); operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { if (src_index == ShapeIndex({0})) { const ShapeIndex target_index = {}; *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) { PointsToSet& points_to_set = CreateEmptyPointsToSet(send); auto top_buffer = points_to_set.mutable_element(ShapeIndex({})); top_buffer->push_back( &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({}))); points_to_set.add_tuple_source({}, send); auto context_buffer = points_to_set.mutable_element(ShapeIndex({1})); context_buffer->push_back( &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({1}))); auto token_buffer = points_to_set.mutable_element(ShapeIndex({2})); token_buffer->push_back( &logical_buffer_analysis_->GetBuffer(send, ShapeIndex({2}))); const PointsToSet& operand_points_to_set = GetPointsToSet(send->operand(0)); operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { ShapeIndex target_index({0}); for (auto element : src_index) { target_index.push_back(element); } *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } }); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleTuple(HloInstruction* tuple) { absl::Span<HloInstruction* const> operands(tuple->operands()); PointsToSet& points_to_set = CreateEmptyPointsToSet(tuple); points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(tuple, {}), {}); for (int64_t i = 0; i < operands.size(); ++i) { const PointsToSet& operand_points_to_set = *PerInst(operands[i])->points_to_set; operand_points_to_set.ForEachElement( [&points_to_set, &operand_points_to_set, i]( const ShapeIndex& src_index, const PointsToSet::BufferList& points_to) { ShapeIndex target_index; target_index.push_back(i); for (auto element : src_index) { target_index.push_back(element); } *points_to_set.mutable_element(target_index) = points_to; for (HloInstruction* tuple : operand_points_to_set.tuple_sources(src_index)) { points_to_set.add_tuple_source(target_index, tuple); } }); } points_to_set.add_tuple_source({}, tuple); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleCustomCall( HloInstruction* custom_call) { auto ccall = Cast<HloCustomCallInstruction>(custom_call); PointsToSet& points_to_set = CreateEmptyPointsToSet(custom_call); absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>> aliased_outputs; for (const auto& pair : ccall->output_to_operand_aliasing()) { aliased_outputs.emplace(pair.first, pair.second); } points_to_set.ForEachMutableElement([&](const ShapeIndex& index, PointsToSet::BufferList* buffers) { auto it = aliased_outputs.find(index); if (it == aliased_outputs.end() || !alias_buffer_across_dataflow_) { points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(custom_call, index), index); } else { const PointsToSet& input_set = *PerInst(ccall->operand(it->second.first))->points_to_set; for (const LogicalBuffer* input_buffer : input_set.element(it->second.second)) { points_to_set.AddPointedToBuffer(*input_buffer, index); } for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) { points_to_set.add_tuple_source(index, tuple); } } }); points_to_set.add_tuple_source({}, custom_call); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleFusion(HloInstruction* fusion) { auto cfusion = Cast<HloFusionInstruction>(fusion); PointsToSet& points_to_set = CreateEmptyPointsToSet(fusion); absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>> aliased_outputs; for (const auto& pair : cfusion->output_to_operand_aliasing()) { aliased_outputs.emplace(pair.first, pair.second); } points_to_set.ForEachMutableElement([&](const ShapeIndex& index, PointsToSet::BufferList* buffers) { auto it = aliased_outputs.find(index); if (it == aliased_outputs.end()) { points_to_set.AddPointedToBuffer( logical_buffer_analysis_->GetBuffer(fusion, index), index); } else { const PointsToSet& input_set = *PerInst(cfusion->operand(it->second.first))->points_to_set; for (const LogicalBuffer* input_buffer : input_set.element(it->second.second)) { points_to_set.AddPointedToBuffer(*input_buffer, index); } for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) { points_to_set.add_tuple_source(index, tuple); } } }); points_to_set.add_tuple_source({}, fusion); return absl::OkStatus(); } absl::Status TuplePointsToAnalysis::HandleOptimizationBarrier( HloInstruction* barrier) { CreateCopiedPointsToSet(barrier, barrier->operand(0)); return absl::OkStatus(); } const PointsToSet& TuplePointsToAnalysis::GetPointsToSet( const HloInstruction* hlo_instruction) const { return *PerInst(hlo_instruction)->points_to_set; } PointsToSet& TuplePointsToAnalysis::CreateEmptyPointsToSet( const HloInstruction* instruction) { PerInstruction* pi = PerInst(instruction); CHECK(pi->points_to_set == nullptr) << "instruction should not have been present in the map."; auto set = std::make_unique<PointsToSet>(&instruction->shape()); pi->points_to_set = std::move(set); return *pi->points_to_set; } bool TuplePointsToAnalysis::InstructionDefinesBufferAtIndex( const HloInstruction* instruction, const ShapeIndex& index) const { const auto& buffers = GetPointsToSet(instruction).element(index); return (buffers.size() == 1 && buffers[0]->instruction() == instruction); } absl::Status TuplePointsToAnalysis::VerifyBuffer( const LogicalBuffer& buffer) const { if (!InstructionDefinesBufferAtIndex(buffer.instruction(), buffer.index())) { return FailedPrecondition( "LogicalBuffer %s is ill-defined: instruction %s does not define a " "buffer at that index", buffer.ToString(), buffer.instruction()->name()); } if (buffer.id() < 0 || buffer.id() >= logical_buffer_analysis_->num_logical_buffers()) { return FailedPrecondition("LogicalBuffer %s is ill-defined: invalid id %d", buffer.ToString(), buffer.id()); } if (GetBuffer(buffer.id()).instruction() != buffer.instruction() || GetBuffer(buffer.id()).index() != buffer.index()) { return FailedPrecondition( "LogicalBuffer %s is ill-defined: buffer with same id differs: %s", buffer.ToString(), GetBuffer(buffer.id()).ToString()); } return absl::OkStatus(); } const LogicalBuffer& TuplePointsToAnalysis::GetBuffer( LogicalBuffer::Id id) const { CHECK_GE(id, 0); CHECK_LT(id, logical_buffer_analysis_->num_logical_buffers()); return logical_buffer_analysis_->GetBuffer(id); } absl::StatusOr<const LogicalBuffer*> TuplePointsToAnalysis::GetBufferDefinedAt( const HloInstruction* instruction, const ShapeIndex& index) const { const auto& buffers = GetPointsToSet(instruction).element(index); if (buffers.size() != 1 || buffers[0]->instruction() != instruction) { return FailedPrecondition( "instruction %s does not define buffer at index {%s}", instruction->name(), absl::StrJoin(index, ",")); } return buffers[0]; } const TuplePointsToAnalysis::BufferAliasVector& TuplePointsToAnalysis::GetBufferAliases(const LogicalBuffer& buffer) const { return logical_buffer_aliases_[buffer.id()]; } const TuplePointsToAnalysis::BufferDefinitionVector& TuplePointsToAnalysis::GetBuffersDefinedByInstruction( const HloInstruction* instruction) const { return PerInst(instruction)->instruction_defined_buffers; } absl::Status TuplePointsToAnalysis::GatherBuffersDefinedByInstruction( const HloInstruction* instruction, TuplePointsToAnalysis::BufferDefinitionVector* buffers) { GetPointsToSet(instruction) .ForEachElement([buffers, instruction]( const ShapeIndex& index, const PointsToSet::BufferList& source_buffers) { CHECK(!source_buffers.empty()); if (source_buffers.size() == 1 && source_buffers[0]->instruction() == instruction) { DCHECK(source_buffers[0]->index() == index); buffers->push_back(source_buffers[0]); } else { for (const LogicalBuffer* source_buffer : source_buffers) { DCHECK(source_buffer->instruction() != instruction); } } }); return absl::OkStatus(); } PointsToSet& TuplePointsToAnalysis::CreateCopiedPointsToSet( const HloInstruction* instruction, const HloInstruction* src) { PointsToSet& dst_points_to_set = CreateEmptyPointsToSet(instruction); const PointsToSet& src_points_to_set = GetPointsToSet(src); dst_points_to_set.ForEachMutableElement( [&dst_points_to_set, &src_points_to_set]( const ShapeIndex& index, PointsToSet::BufferList* buffers) { *buffers = src_points_to_set.element(index); for (auto& tuple_source : src_points_to_set.tuple_sources(index)) { dst_points_to_set.add_tuple_source(index, tuple_source); } }); return *PerInst(instruction)->points_to_set; } std::string TuplePointsToAnalysis::ToString() const { std::string output = absl::StrFormat("TuplePointsToSet for module %s:\n", module_->name()); for (const auto* computation : module_->MakeNonfusionComputations()) { const char* entry = computation == module_->entry_computation() ? "entry " : ""; absl::StrAppend(&output, entry, "computation ", computation->name(), ":\n"); for (const HloInstruction* instruction : computation->MakeInstructionPostOrder()) { InstructionToString(instruction, &output); if (instruction->opcode() == HloOpcode::kFusion) { for (auto* fused : instruction->fused_instructions()) { InstructionToString(fused, &output); } } } } absl::StrAppend(&output, "LogicalBuffers:\n"); for (const auto& b : logical_buffer_analysis_->logical_buffers()) { absl::StrAppend(&output, " buffer ", b->ToString(), ":\n"); for (const BufferAlias& alias : logical_buffer_aliases_[b->id()]) { absl::StrAppend(&output, " alias ", alias.ToString(), "\n"); } } return output; } void TuplePointsToAnalysis::InstructionToString( const HloInstruction* instruction, std::string* output) const { const std::string prefix = instruction->IsFused() ? " " : ""; absl::StrAppend(output, prefix, " instruction ", instruction->ToShortString(), ":\n"); const PointsToSet& points_to_set = GetPointsToSet(instruction); points_to_set.ForEachElement( [&prefix, &output](const ShapeIndex& index, const PointsToSet::BufferList& points_to) { absl::StrAppend( output, prefix, " {", absl::StrJoin(index, ","), "}: ", absl::StrJoin(points_to, ", ", [](std::string* out, const LogicalBuffer* source) { out->append(source->ToString()); }), "\n"); }); } bool TuplePointsToAnalysis::DoesNotUseOperandBuffer( const HloInstruction* operand, const ShapeIndex& index, const HloInstruction* user) const { CHECK(user->IsUserOf(operand)) << "user: " << user->ToString() << " operand: " << operand->ToString(); if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) { return true; } else if (user->IsLoopFusion()) { auto it = absl::c_find_if( user->fused_parameters(), [&](HloInstruction* fused_param) { return user->operand(fused_param->parameter_number()) == operand; }); CHECK(it != user->fused_parameters().end()); const LogicalBuffer* buffer = GetBufferDefinedAt(*it, index).value(); for (const BufferAlias& alias : GetBufferAliases(*buffer)) { for (HloInstruction* alias_user : alias.instruction()->users()) { if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(), alias_user)) { continue; } return false; } } return true; } return false; } std::vector<std::pair<HloInstruction*, int64_t>> TuplePointsToAnalysis::GetAllUsesOfInstructionAtIndex( HloInstruction* instruction, const ShapeIndex& index) const { std::vector<std::pair<HloInstruction*, int64_t>> uses; const PointsToSet::BufferList& points_to = GetPointsToSet(instruction).element(index); for (const LogicalBuffer* buffer : points_to) { for (const BufferAlias& alias : GetBufferAliases(*buffer)) { for (HloInstruction* alias_user : alias.instruction()->users()) { if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(), alias_user)) { continue; } for (int64_t op_idx : alias_user->OperandIndices(alias.instruction())) { uses.emplace_back(alias_user, op_idx); } } } } return uses; } bool TuplePointsToAnalysis::HasUniqueFusedUseOfOperandAt( HloInstruction* operand, const ShapeIndex& operand_index, HloInstruction* fusion, const int64_t use_operand_index) const { CHECK_EQ(HloOpcode::kFusion, fusion->opcode()); if (fusion->OperandIndices(operand).size() > 1) { return false; } const auto& fused_params = fusion->fused_parameters(); auto fused_param_it = absl::c_find_if(fused_params, [&](HloInstruction* fused_param) { return fusion->operand(fused_param->parameter_number()) == operand; }); if (fused_param_it == fused_params.end()) { return false; } auto* fused_param = *fused_param_it; auto fused_param_uses = GetAllUsesOfInstructionAtIndex(fused_param, operand_index); return fused_param_uses.size() == 1 && fused_param_uses[0].first == fusion->fused_expression_root() && fused_param_uses[0].second == use_operand_index; } }
#include "xla/service/tuple_points_to_analysis.h" #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/service/logical_buffer.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace { using ::testing::UnorderedElementsAre; using ::testing::UnorderedElementsAreArray; class TuplePointsToAnalysisTest : public HloTestBase { protected: void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) { BuildModule(std::move(computation)); RunAnalysis(); } void BuildModule(std::unique_ptr<HloComputation> computation) { module_ = CreateNewVerifiedModule(); module_->AddEntryComputation(std::move(computation)); } void RunAnalysis() { CHECK_NOTNULL(module_.get()); points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value(); } const LogicalBuffer* GetBuffer(const HloInstruction* instruction, const ShapeIndex& index) { const auto& pointed_to = points_to_analysis_->GetPointsToSet(instruction).element(index); CHECK_EQ(1, pointed_to.size()); CHECK_EQ(instruction, pointed_to[0]->instruction()); CHECK(index == pointed_to[0]->index()); return pointed_to[0]; } void ExpectHasBuffers(const PointsToSet::BufferList& points_to_set, absl::Span<const LogicalBuffer* const> buffers) { std::vector<const LogicalBuffer*> vec(buffers.begin(), buffers.end()); EXPECT_THAT(points_to_set, UnorderedElementsAreArray(vec)); } void ExpectHasTopLevelBuffers( const PointsToSet::BufferList& points_to_set, absl::Span<HloInstruction* const> instructions) { PointsToSet::BufferList buffers; for (auto instruction : instructions) { buffers.push_back(GetBuffer(instruction, {})); } ExpectHasBuffers(points_to_set, buffers); } void ExpectHasTopLevelBuffers( const PointsToSet::BufferSet& points_to_set, absl::Span<HloInstruction* const> instructions) { ExpectHasTopLevelBuffers( PointsToSet::BufferList(points_to_set.begin(), points_to_set.end()), instructions); } void ExpectHasBufferAliases( const HloInstruction* instruction, const ShapeIndex& index, absl::Span<const std::pair<HloInstruction*, ShapeIndex>> expected) { const LogicalBuffer* buffer = points_to_analysis_->GetBufferDefinedAt(instruction, index).value(); std::vector<BufferAlias> expected_aliases; expected_aliases.reserve(expected.size()); for (auto& pair : expected) { expected_aliases.push_back(BufferAlias(pair.first, pair.second)); } EXPECT_THAT(points_to_analysis_->GetBufferAliases(*buffer), UnorderedElementsAreArray(expected_aliases)); } std::unique_ptr<HloModule> module_; std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_; }; TEST_F(TuplePointsToAnalysisTest, SimpleTuple) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant1).size()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1}); EXPECT_TRUE( points_to_analysis_->GetPointsToSet(constant1).tuple_sources({}).empty()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct()); EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant2).size()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2}); EXPECT_TRUE( points_to_analysis_->GetPointsToSet(constant2).tuple_sources({}).empty()); EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous()); EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}), UnorderedElementsAre(tuple)); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(), {constant1, constant2, tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2}); const PointsToSet& tuple_points_to_set = points_to_analysis_->GetPointsToSet(tuple); EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex( *GetBuffer(constant1, {}), {0})); EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex( *GetBuffer(constant2, {}), {1})); EXPECT_FALSE(tuple_points_to_set.ContainsBufferAtIndex( *GetBuffer(constant2, {}), {0})); EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant1, {}))); EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant2, {}))); } TEST_F(TuplePointsToAnalysisTest, NestedTuple) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto inner_tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({inner_tuple, constant3})); BuildModuleAndRunAnalysis(builder.Build()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(constant3).element({}), {constant3}); EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(inner_tuple).size()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(inner_tuple).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(inner_tuple).IsDistinct()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(inner_tuple).CreateFlattenedSet(), {constant1, constant2, inner_tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(inner_tuple).element({}), {inner_tuple}); EXPECT_THAT( points_to_analysis_->GetPointsToSet(inner_tuple).tuple_sources({}), UnorderedElementsAre(inner_tuple)); EXPECT_EQ(5, points_to_analysis_->GetPointsToSet(tuple).size()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(), {constant1, constant2, constant3, inner_tuple, tuple}); EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}), UnorderedElementsAre(tuple)); EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({0}), UnorderedElementsAre(inner_tuple)); EXPECT_TRUE( points_to_analysis_->GetPointsToSet(tuple).tuple_sources({1}).empty()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({0}), {inner_tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({0, 0}), {constant1}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({0, 1}), {constant2}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant3}); } TEST_F(TuplePointsToAnalysisTest, GetTupleElement) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto inner_tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto constant3 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({inner_tuple, constant3})); auto get_tuple_element = builder.AddInstruction( HloInstruction::CreateGetTupleElement(inner_tuple->shape(), tuple, 0)); BuildModuleAndRunAnalysis(builder.Build()); auto& points_to_set = points_to_analysis_->GetPointsToSet(get_tuple_element); EXPECT_EQ(3, points_to_set.size()); EXPECT_FALSE(points_to_set.IsAmbiguous()); EXPECT_TRUE(points_to_set.IsDistinct()); ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(), {constant1, constant2, inner_tuple}); ExpectHasTopLevelBuffers(points_to_set.element({}), {inner_tuple}); EXPECT_THAT(points_to_set.tuple_sources({}), UnorderedElementsAre(inner_tuple)); } TEST_F(TuplePointsToAnalysisTest, AddDependency) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto token = builder.AddInstruction(HloInstruction::CreateToken()); auto add_dependency = builder.AddInstruction( HloInstruction::CreateAddDependency(constant, token)); BuildModuleAndRunAnalysis(builder.Build()); auto& points_to_set = points_to_analysis_->GetPointsToSet(add_dependency); EXPECT_EQ(1, points_to_set.size()); EXPECT_FALSE(points_to_set.IsAmbiguous()); EXPECT_TRUE(points_to_set.IsDistinct()); ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(), {constant}); } TEST_F(TuplePointsToAnalysisTest, DuplicatedElement) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant, constant, constant})); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_EQ(2, points_to_analysis_->GetPointsToSet(tuple).size()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(), {constant, tuple}); } TEST_F(TuplePointsToAnalysisTest, TupleCopy) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto copy = builder.AddInstruction( HloInstruction::CreateUnary(tuple->shape(), HloOpcode::kCopy, tuple)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy).IsDistinct()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(), {constant1, constant2, tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(copy).element({}), {copy}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(copy).CreateFlattenedSet(), {constant1, constant2, copy}); } TEST_F(TuplePointsToAnalysisTest, CopyStartAndCopyDone) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto copy_start = builder.AddInstruction(HloInstruction::CreateCopyStart( ShapeUtil::MakeTupleShape({constant->shape(), constant->shape(), ShapeUtil::MakeShape(U32, {})}), constant)); auto copy_done = builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kCopyDone, copy_start)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_start).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_start).IsDistinct()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_done).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_done).IsDistinct()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(copy_start).element({}), {copy_start}); ExpectHasBufferAliases(copy_start, {0}, {{copy_start, {0}}, {copy_done, {}}}); ExpectHasBufferAliases(constant, {}, {{constant, {}}, {copy_start, {1}}}); } TEST_F(TuplePointsToAnalysisTest, AsyncOps) { std::string hlo_str = R"( HloModule module ENTRY entry { p0 = f32[2,3] parameter(0) async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo" async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start) ROOT async-done = f32[2,3] custom-call-done(async-update) } )"; TF_ASSERT_OK_AND_ASSIGN( module_, ParseAndReturnVerifiedModule(hlo_str, GetModuleConfigForTest())); HloInstruction* param = module_->entry_computation()->parameter_instruction(0); HloInstruction* async_start = FindInstruction(module_.get(), "async-start"); HloInstruction* async_update = FindInstruction(module_.get(), "async-update"); HloInstruction* async_done = FindInstruction(module_.get(), "async-done"); RunAnalysis(); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_start).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_start).IsDistinct()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_update).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_update).IsDistinct()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_done).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_done).IsDistinct()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(async_start).element({}), {async_start}); ExpectHasBufferAliases( param, {}, {{param, {}}, {async_start, {0, 0}}, {async_update, {0, 0}}}); ExpectHasBufferAliases( async_start, {1}, {{async_start, {1}}, {async_update, {1}}, {async_done, {}}}); ExpectHasBufferAliases(async_start, {2}, {{async_start, {2}}, {async_update, {2}}}); } TEST_F(TuplePointsToAnalysisTest, SendAndSendDone) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto token = builder.AddInstruction(HloInstruction::CreateToken()); auto send = builder.AddInstruction( HloInstruction::CreateSend(constant, token, 0)); auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send).IsDistinct()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send_done).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send_done).IsDistinct()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(send).element({}), {send}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(send).element({0}), {constant}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(send_done).CreateFlattenedSet(), {send_done}); ExpectHasBufferAliases(constant, {}, {{constant, {}}, {send, {0}}}); } TEST_F(TuplePointsToAnalysisTest, RecvAndRecvDone) { auto builder = HloComputation::Builder(TestName()); auto token = builder.AddInstruction(HloInstruction::CreateToken()); auto recv = builder.AddInstruction(HloInstruction::CreateRecv( ShapeUtil::MakeShape(F32, {1, 2, 3}), token, 0)); auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv).IsDistinct()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv_done).IsAmbiguous()); EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv_done).IsDistinct()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(recv).element({}), {recv}); ExpectHasBufferAliases(recv, {0}, {{recv, {0}}, {recv_done, {0}}}); } TEST_F(TuplePointsToAnalysisTest, TupleWithBitcast) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto bitcast = builder.AddInstruction( HloInstruction::CreateBitcast(constant2->shape(), constant2)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({constant1, bitcast})); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(bitcast).size()); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(bitcast).element({}), {constant2}); EXPECT_TRUE( points_to_analysis_->GetPointsToSet(bitcast).tuple_sources({}).empty()); EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size()); EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous()); EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}), UnorderedElementsAre(tuple)); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(), {constant1, constant2, tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1}); ExpectHasTopLevelBuffers( points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2}); } TEST_F(TuplePointsToAnalysisTest, PointsToTupleConstantElements) { auto builder = HloComputation::Builder(TestName()); Literal elements[] = {LiteralUtil::CreateR2<float>({{1.0}, {2.0}}), LiteralUtil::CreateR1<float>({2.0, 42})}; auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::MakeTuple({&elements[0], &elements[1]}))); auto copy = builder.AddInstruction(HloInstruction::CreateUnary( tuple_constant->shape(), HloOpcode::kCopy, tuple_constant)); BuildModuleAndRunAnalysis(builder.Build()); auto& points_to_set = points_to_analysis_->GetPointsToSet(copy); ExpectHasBuffers(points_to_set.element({}), {GetBuffer(copy, {})}); ExpectHasBuffers(points_to_set.element({0}), {GetBuffer(tuple_constant, {0})}); ExpectHasBuffers(points_to_set.element({1}), {GetBuffer(tuple_constant, {1})}); } TEST_F(TuplePointsToAnalysisTest, BufferAliases) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); auto inner_tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({inner_tuple, constant2})); BuildModuleAndRunAnalysis(builder.Build()); ExpectHasBufferAliases( constant1, {}, {{constant1, {}}, {inner_tuple, {0}}, {tuple, {0, 0}}}); ExpectHasBufferAliases( constant2, {}, {{constant2, {}}, {inner_tuple, {1}}, {tuple, {0, 1}}, {tuple, {1}}}); ExpectHasBufferAliases(inner_tuple, {}, {{inner_tuple, {}}, {tuple, {0}}}); ExpectHasBufferAliases(tuple, {}, {{tuple, {}}}); } TEST_F(TuplePointsToAnalysisTest, DISABLED_CustomCall) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); Shape data_shape = ShapeUtil::MakeShape(F32, {}); auto ccall = builder.AddInstruction(HloInstruction::CreateCustomCall( ShapeUtil::MakeTupleShape({data_shape, data_shape}), {constant}, "TestOp")); Cast<HloCustomCallInstruction>(ccall)->set_output_to_operand_aliasing( {std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>{ ShapeIndex{1}, std::pair<int64_t, ShapeIndex>(0, {})}}); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, ccall, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, ccall, 1)); BuildModuleAndRunAnalysis(builder.Build()); ExpectHasBufferAliases(ccall, {0}, {{gte0, {}}, {ccall, {0}}}); ExpectHasBufferAliases(constant, {}, {{constant, {}}, {gte1, {}}, {ccall, {1}}}); } class FusionPointsToAnalysisTest : public TuplePointsToAnalysisTest { protected: void Run(const std::string& hlo_str, int64_t expected_num_users) { TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_str)); auto* fusion = module_->entry_computation()->root_instruction(); auto* tuple_param0 = fusion->operand(0); RunAnalysis(); auto* fusion_param = GetFusionParameterForOperand(fusion, tuple_param0); ExpectHasBuffers( points_to_analysis_->GetPointsToSet(fusion_param).element({}), {GetBuffer(fusion_param, {})}); ExpectHasBuffers( points_to_analysis_->GetPointsToSet(fusion_param).element({0}), {GetBuffer(fusion_param, {0})}); ExpectHasBuffers( points_to_analysis_->GetPointsToSet(fusion_param).element({1}), {GetBuffer(fusion_param, {1})}); auto fused_gte0 = GetUniqueFusionParameterUserAt(fusion_param, 0); ExpectHasBuffers( points_to_analysis_->GetPointsToSet(fused_gte0).element({}), {GetBuffer(fusion_param, {0})}); auto fused_gte1 = GetUniqueFusionParameterUserAt(fusion_param, 1); ExpectHasBuffers( points_to_analysis_->GetPointsToSet(fused_gte1).element({}), {GetBuffer(fusion_param, {1})}); ExpectHasBufferAliases(fusion_param, {0}, {{fusion_param, {0}}, {fused_gte0, {}}}); ExpectHasBufferAliases(fusion_param, {1}, {{fusion_param, {1}}, {fused_gte1, {}}}); ExpectNumUsersOfAliases(fusion_param, {0}, expected_num_users); } HloInstruction* GetFusionParameterForOperand(HloInstruction* fusion, const HloInstruction* operand) { const auto& fused_instructions = fusion->fused_instructions(); auto it = absl::c_find_if(fused_instructions, [&](const HloInstruction* fused) { return fused->opcode() == HloOpcode::kParameter && fusion->operand(fused->parameter_number()) == operand; }); CHECK(it != fusion->fused_instructions().end()); return *it; } std::vector<HloInstruction*> GetFusionParameterUsersAt( HloInstruction* fusion_param, int64_t tuple_index) { CHECK(fusion_param->shape().IsTuple()); std::vector<HloInstruction*> users_at_tuple_index; for (auto user : fusion_param->users()) { CHECK_EQ(HloOpcode::kGetTupleElement, user->opcode()); if (user->tuple_index() == tuple_index) { users_at_tuple_index.push_back(user); } } return users_at_tuple_index; } HloInstruction* GetUniqueFusionParameterUserAt(HloInstruction* fusion_param, int64_t tuple_index) { std::vector<HloInstruction*> users = GetFusionParameterUsersAt(fusion_param, tuple_index); CHECK_EQ(1, users.size()); return users[0]; } void ExpectNumUsersOfAliases(const HloInstruction* instruction, const ShapeIndex& index, const int64_t expected_num_users) { const auto* buffer = GetBuffer(instruction, index); int64_t num_users = 0; for (const auto& alias : points_to_analysis_->GetBufferAliases(*buffer)) { for (auto user : alias.instruction()->users()) { if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) { continue; } ++num_users; } } EXPECT_EQ(expected_num_users, num_users); } }; TEST_F(FusionPointsToAnalysisTest, FusionParam0OneUser) { std::string hlo_str = R"( HloModule FusionParam0OneUser %fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] { %param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0) %get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0 %get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1 %constant.3 = f32[3]{0} constant({1, 1, 1}) %add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3) %constant.2 = s32[] constant(0) ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.1, s32[] %constant.2) } ENTRY %FusionParam0OneUser (param0: (f32[8], f32[3])) -> f32[8] { %param0 = (f32[8]{0}, f32[3]{0}) parameter(0) ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation } )"; Run(hlo_str, 1); } TEST_F(FusionPointsToAnalysisTest, FusionParam0TwoUsers) { std::string hlo_str = R"( HloModule FusionParam0TwoUsers %fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] { %param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0) %get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0 %get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1 %constant.3 = f32[3]{0} constant({1, 1, 1}) %add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3) %slice = f32[3]{0} slice(f32[8]{0} %get-tuple-element.1), slice={[0:3]} %add.2 = f32[3]{0} add(f32[3]{0} %add.1, f32[3]{0} %slice) %constant.2 = s32[] constant(0) ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.2, s32[] %constant.2) } ENTRY %FusionParam0TwoUsers (param0: (f32[8], f32[3])) -> f32[8] { %param0 = (f32[8]{0}, f32[3]{0}) parameter(0) ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation } )"; Run(hlo_str, 2); } class PointsToAnalysisTestBase : public HloTestBase { protected: void BuildModule(std::unique_ptr<HloComputation> computation) { module_ = CreateNewVerifiedModule(); computation_ = module_->AddEntryComputation(std::move(computation)); } void RunAnalysis() { CHECK_NOTNULL(module_.get()); points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value(); } void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) { BuildModule(std::move(computation)); RunAnalysis(); } std::unique_ptr<HloModule> module_; HloComputation* computation_ = nullptr; std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_; }; class DoesNotUseOperandBufferTest : public PointsToAnalysisTestBase {}; TEST_F(DoesNotUseOperandBufferTest, GetTupleElement) { auto builder = HloComputation::Builder(TestName()); Shape elem_shape = ShapeUtil::MakeShape(F32, {8}); auto tuple = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({elem_shape, elem_shape}), "tuple")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(elem_shape, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(elem_shape, tuple, 1)); builder.AddInstruction( HloInstruction::CreateBinary(elem_shape, HloOpcode::kAdd, gte0, gte1)); BuildModuleAndRunAnalysis(builder.Build()); EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, gte0)); EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, gte1)); EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte0)); EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte1)); } TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) { auto builder = HloComputation::Builder(TestName()); Shape data_shape = ShapeUtil::MakeShape(F32, {8}); auto tuple = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple")); auto gte0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 0)); auto gte1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape, tuple, 1)); auto starts = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2))); auto update = builder.AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, {starts})); builder.AddInstruction( HloInstruction::CreateTuple({gte0, dynamic_update_slice})); BuildModule(builder.Build()); auto fusion = computation_->CreateFusionInstruction( {dynamic_update_slice, starts, update, gte1}, HloInstruction::FusionKind::kLoop); RunAnalysis(); EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, fusion)); EXPECT_FALSE( points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, fusion)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c7d36866-411c-46ed-801c-d8994c2f600b
cpp
tensorflow/tensorflow
scatter_expander
third_party/xla/xla/service/gpu/transforms/scatter_expander.cc
third_party/xla/xla/service/scatter_expander_test.cc
#include "xla/service/gpu/transforms/scatter_expander.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" namespace xla { bool GpuScatterExpander::InstructionMatchesPattern(HloInstruction* inst) { return inst->opcode() == HloOpcode::kScatter && (inst->shape().IsTuple() || primitive_util::BitWidth(inst->shape().element_type()) > 64); } }
#include "xla/service/scatter_expander.h" #include <memory> #include <utility> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" namespace xla { namespace { class ScatterExpanderTest : public HloTestBase { protected: void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) { HloInstruction* inst = FindInstruction(module, inst_name); inst->mutable_shape()->clear_layout(); } }; TEST_F(ScatterExpanderTest, ScatterOperandWithoutLayout) { const char* kModuleStr = R"( HloModule scatter_expander scatter_computation { parameter0 = s32[] parameter(0) ROOT parameter1 = s32[] parameter(1) } ENTRY kernel_entry { operand = s32[5] iota(), iota_dimension=0 indices = s32[1] parameter(0) update = s32[] constant(0) ROOT scatter = s32[5]{0} scatter(operand, indices, update), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=0, to_apply=scatter_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ClearInstructionLayout(module.get(), "operand"); ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_TRUE(result); } TEST_F(ScatterExpanderTest, ScatterMultipleOperandsWithoutLayout) { const char* kModuleStr = R"( HloModule scatter_expander scatter_computation { p0 = s32[] parameter(0) p1 = f32[] parameter(1) p2 = s32[] parameter(2) p3 = f32[] parameter(3) ROOT tuple = tuple(p2, p3) } ENTRY kernel_entry { operand0 = s32[5] iota(), iota_dimension=0 operand1 = f32[5] constant({2,4,6,8,10}) indices = s32[1] parameter(0) update0 = s32[] constant(0) update1 = f32[] constant(1) ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=0, to_apply=scatter_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ClearInstructionLayout(module.get(), "operand0"); ClearInstructionLayout(module.get(), "operand1"); ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_TRUE(result); } TEST_F(ScatterExpanderTest, EliminateSimpleScattersSkipsNontrivialScatter) { const char* kModuleStr = R"( HloModule scatter_expander scatter_computation { parameter0 = s32[] parameter(0) ROOT parameter1 = s32[] parameter(1) } ENTRY kernel_entry { operand = s32[3,3] parameter(0) indices = s32[2] parameter(1) updates = s32[2,3] parameter(2) ROOT scatter = s32[3,3] scatter(operand, indices, updates), to_apply=scatter_computation, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ClearInstructionLayout(module.get(), "operand"); ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_FALSE(result); } TEST_F(ScatterExpanderTest, EliminateSimpleMultioutpuScattersSkipsNontrivialScatter) { const char* kModuleStr = R"( HloModule scatter_expander scatter_computation { p0 = s32[] parameter(0) p1 = f32[] parameter(1) p2 = s32[] parameter(2) p3 = f32[] parameter(3) ROOT tuple = tuple(p2, p3) } ENTRY kernel_entry { operand0 = s32[3,3] parameter(0) operand1 = bf16[3,3] parameter(1) indices = s32[2] parameter(2) update0 = s32[2,3] parameter(3) update1 = bf16[2,3] parameter(4) ROOT scatter = (s32[3,3], bf16[3,3]) scatter(operand0, operand1, indices, update0, update1), to_apply=scatter_computation, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ClearInstructionLayout(module.get(), "operand0"); ClearInstructionLayout(module.get(), "operand1"); ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_FALSE(result); } TEST_F(ScatterExpanderTest, EliminateSimpleScattersRewritesTrivialScatter) { const char* kModuleStr = R"( HloModule scatter_expander scatter_computation { parameter0 = s32[] parameter(0) ROOT parameter1 = s32[] parameter(1) } ENTRY kernel_entry { operand = s32[5] iota(), iota_dimension=0 indices = s32[1] parameter(0) update = s32[] constant(0) ROOT scatter = s32[5]{0} scatter(operand, indices, update), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=0, to_apply=scatter_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ClearInstructionLayout(module.get(), "operand"); ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_TRUE(result); } TEST_F(ScatterExpanderTest, EliminateSimpleMultioutputScattersRewritesTrivialScatter) { const char* kModuleStr = R"( HloModule scatter_expander scatter_computation { p0 = s32[] parameter(0) p1 = f32[] parameter(1) p2 = s32[] parameter(2) p3 = f32[] parameter(3) ROOT tuple = tuple(p2, p3) } ENTRY kernel_entry { operand0 = s32[5] iota(), iota_dimension=0 operand1 = f32[5] iota(), iota_dimension=0 indices = s32[1] parameter(0) update0 = s32[] constant(0) update1 = f32[] constant(0) ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=0, to_apply=scatter_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ClearInstructionLayout(module.get(), "operand0"); ClearInstructionLayout(module.get(), "operand1"); ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_TRUE(result); } TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeCombiner) { const char* const kModuleStr = R"( HloModule scatter_expander scatter_computation { arg1.173 = s32[] parameter(1) arg0.172 = s32[] parameter(0) ROOT add.48 = s32[] add(arg0.172, arg1.173) } ENTRY fused_computation { bitcast.2335 = s32[1,4096] parameter(0) pad.96 = s32[4096,2] parameter(1) bitcast.2748 = s32[4096,1,1] parameter(2) ROOT scatter.48 = s32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748), update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=scatter_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ScatterExpander scatter_expander( ScatterExpander::kEliminateIndeterministicScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_FALSE(result); } TEST_F(ScatterExpanderTest, EliminateScatterWithNonAssociativeCombiner) { const char* const kModuleStr = R"( HloModule scatter_expander scatter_computation { arg1.173 = f32[] parameter(1) arg0.172 = f32[] parameter(0) ROOT add.48 = f32[] add(arg0.172, arg1.173) } ENTRY fused_computation { bitcast.2335 = f32[1,4096] parameter(0) pad.96 = s32[4096,2] parameter(1) bitcast.2748 = f32[4096,1,1] parameter(2) ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748), update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=scatter_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ScatterExpander scatter_expander( ScatterExpander::kEliminateIndeterministicScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_TRUE(result); } TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeFp32Combiner) { const char* const kModuleStr = R"( HloModule scatter_expander scatter_computation { arg1.173 = f32[] parameter(1) arg0.172 = f32[] parameter(0) ROOT max.48 = f32[] maximum(arg0.172, arg1.173) } ENTRY fused_computation { bitcast.2335 = f32[1,4096] parameter(0) pad.96 = s32[4096,2] parameter(1) bitcast.2748 = f32[4096,1,1] parameter(2) ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748), update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=scatter_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); ScatterExpander scatter_expander( ScatterExpander::kEliminateIndeterministicScatters); TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&scatter_expander, module.get())); EXPECT_FALSE(result); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scatter_expander.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_expander_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
931a8bc3-ba94-4243-bc39-9a972cf91eb2
cpp
tensorflow/tensorflow
dynamic_dimension_inference
third_party/xla/xla/service/dynamic_dimension_inference.cc
third_party/xla/xla/service/dynamic_dimension_inference_test.cc
#include "xla/service/dynamic_dimension_inference.h" #include <cstdint> #include <functional> #include <memory> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/functional/function_ref.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/dynamic_parameter_binding.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/call_inliner.h" #include "xla/service/dynamic_window_utils.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/service/hlo_value.h" #include "xla/service/tuple_util.h" #include "xla/service/while_util.h" #include "xla/shape.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::StatusOr<std::pair<HloComputation*, CallInliner::InlinedInstructionMap>> WidenComputation(HloComputation* narrow_comp, const Shape& wide_shape) { TF_RET_CHECK(wide_shape.IsTuple()); const Shape& narrow_shape = narrow_comp->parameter_instruction(0)->shape(); if (Shape::Equal()(wide_shape, narrow_shape)) { return std::make_pair(narrow_comp, CallInliner::InlinedInstructionMap()); } HloComputation* wide_comp = [&]() { HloComputation::Builder builder(absl::StrCat("wide.", narrow_comp->name())); builder.AddInstruction(HloInstruction::CreateParameter( 0, wide_shape, absl::StrCat("wide.", narrow_comp->parameter_instruction(0)->name()))); return narrow_comp->parent()->AddEmbeddedComputation(builder.Build()); }(); HloInstruction* wide_parameter = wide_comp->parameter_instruction(0); HloInstruction* truncated_parameter = TupleUtil::ExtractPrefix( wide_parameter, narrow_shape.tuple_shapes_size(), absl::StrCat("renarrowed.", narrow_comp->parameter_instruction(0)->name())); HloInstruction* call_narrow_comp = wide_comp->AddInstruction( HloInstruction::CreateCall(narrow_comp->root_instruction()->shape(), {truncated_parameter}, narrow_comp)); wide_comp->set_root_instruction(call_narrow_comp, true); TF_ASSIGN_OR_RETURN(auto inline_map, CallInliner::Inline(call_narrow_comp)); return std::make_pair(wide_comp, std::move(inline_map)); } } class DynamicDimensionInferenceVisitor : public DfsHloRewriteVisitor { public: explicit DynamicDimensionInferenceVisitor( const DynamicParameterBinding& param_bindings, HloDataflowAnalysis& dataflow_analysis, DynamicDimensionInference* parent, DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler, DynamicDimensionInference::ShapeCheckMode shape_check_mode, DynamicDimensionInference::AssertionGenerator assertion_generator) : param_bindings_(param_bindings), dataflow_analysis_(dataflow_analysis), parent_(parent), custom_call_handler_(std::move(custom_call_handler)), shape_check_mode_(shape_check_mode), assertion_generator_(assertion_generator) {} absl::Status DefaultAction(HloInstruction* hlo) override; static absl::StatusOr<bool> Run( HloComputation* computation, HloDataflowAnalysis& dataflow_analysis, const DynamicParameterBinding& param_bindings, DynamicDimensionInference* parent, DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler = nullptr, DynamicDimensionInference::ShapeCheckMode shape_check_mode = DynamicDimensionInference::ShapeCheckMode::kIgnore, const DynamicDimensionInference::AssertionGenerator& assertion_generator = nullptr) { if (!HloInstruction::IsThreadIncluded(computation->execution_thread(), parent->execution_threads_)) { return false; } DynamicDimensionInferenceVisitor visitor( param_bindings, dataflow_analysis, parent, std::move(custom_call_handler), shape_check_mode, assertion_generator); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); if (visitor.shape_assertion_ != nullptr) { CHECK(assertion_generator); assertion_generator(visitor.shape_assertion_); } return visitor.changed(); } absl::Status HandleParameter(HloInstruction* hlo) override; absl::Status HandleInfeed(HloInstruction* hlo) override; absl::Status HandleConstant(HloInstruction* hlo) override; absl::Status HandleReduce(HloInstruction* hlo) override; absl::Status HandleDot(HloInstruction* hlo) override; absl::Status HandleTuple(HloInstruction* hlo) override; absl::Status HandleTranspose(HloInstruction* hlo) override; absl::Status HandleDynamicReshape(HloInstruction* hlo) override; absl::Status HandleReshape(HloInstruction* hlo) override; absl::Status HandleSort(HloInstruction* hlo) override; absl::Status HandlePad(HloInstruction* hlo) override; absl::Status HandleCustomCall(HloInstruction* hlo) override; absl::Status HandleBroadcast(HloInstruction* hlo) override; absl::Status HandleGetDimensionSize(HloInstruction* hlo) override; absl::Status HandleSetDimensionSize(HloInstruction* hlo) override; absl::Status HandleSelect(HloInstruction* hlo) override; absl::Status HandleConvolution(HloInstruction* hlo) override; absl::Status HandleConcatenate(HloInstruction* hlo) override; absl::Status HandleReduceWindow(HloInstruction* hlo) override; absl::Status HandleReverse(HloInstruction* hlo) override; absl::Status HandleSelectAndScatter(HloInstruction* hlo) override; absl::Status HandleGetTupleElement(HloInstruction* hlo) override; absl::Status HandleElementwiseUnary(HloInstruction* hlo) override; absl::Status HandleElementwiseNary(HloInstruction* hlo); absl::Status HandleElementwiseBinary(HloInstruction* hlo) override; absl::Status HandleClamp(HloInstruction* hlo) override; absl::Status HandleConditional(HloInstruction* hlo) override; absl::Status HandleWhile(HloInstruction* hlo) override; absl::Status HandleSlice(HloInstruction* hlo) override; absl::Status HandleDynamicSlice(HloInstruction* hlo) override; absl::Status HandleDynamicUpdateSlice(HloInstruction* hlo) override; absl::Status HandleGather(HloInstruction* hlo) override; absl::Status HandleScatter(HloInstruction* hlo) override; absl::Status HandleMap(HloInstruction* hlo) override; absl::Status HandleDomain(HloInstruction* hlo) override; absl::Status HandleAsyncStart(HloInstruction* hlo) override; absl::Status HandleAsyncDone(HloInstruction* hlo) override; private: using OperandDynamicDimensionFn = absl::FunctionRef<absl::Status( HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size)>; using DynamicDimensionFn = std::function<absl::Status( ShapeIndex index, int64_t dimension, HloInstruction* dynamic_size)>; void SetDynamicSize(HloInstruction* inst, const ShapeIndex& index, int64_t dim, HloInstruction* size, bool clear_dynamic_dimension = true); void SetDynamicSizes(HloInstruction* inst, const ShapeIndex& index, absl::Span<HloInstruction* const> sizes); absl::Status HandleDynamicConvolutionForward(HloInstruction* hlo, int64_t operand_index, int64_t dimension, HloInstruction* dynamic_size); absl::Status HandleDynamicConvolutionKernelGrad(HloInstruction* hlo, int64_t operand_index, int64_t dimension); absl::Status HandleDynamicConvolutionInputGrad(HloInstruction* hlo, int64_t operand_index, int64_t dimension); absl::Status HandleDynamicWindowSamePadding(HloInstruction* hlo, HloInstruction* dynamic_size, int64_t operand_index, int64_t dimension); absl::Status ForEachOperandDynamicDimension(HloInstruction* inst, OperandDynamicDimensionFn); absl::Status ForEachDynamicDimensionInOperand(HloInstruction* inst, int64_t operand_index, OperandDynamicDimensionFn); absl::Status ForEachDynamicDimension(HloInstruction* inst, const DynamicDimensionFn& fn); bool CanInfer(HloInstruction* hlo) { return parent_->CanInfer(hlo); } absl::StatusOr<bool> RequiresPadToStatic(HloInstruction* instr, ShapeIndex shape_index); absl::Status InsertPadToStaticOnInstruction(HloInstruction* inst); absl::Status InsertShapeCheck(HloInstruction* dim1, HloInstruction* dim2, bool support_implicit_broadcast); absl::Status PassThroughDynamicDimension(HloInstruction*); const DynamicParameterBinding& param_bindings_; HloDataflowAnalysis& dataflow_analysis_; DynamicDimensionInference* parent_; DynamicDimensionInference::CustomCallInferenceHandler custom_call_handler_; DynamicDimensionInference::ShapeCheckMode shape_check_mode_; HloInstruction* shape_assertion_ = nullptr; DynamicDimensionInference::AssertionGenerator assertion_generator_; }; void DynamicDimensionInferenceVisitor::SetDynamicSize( HloInstruction* inst, const ShapeIndex& index, int64_t dim, HloInstruction* size, bool clear_dynamic_dimension) { parent_->SetDynamicSize(inst, index, dim, size); if (clear_dynamic_dimension) { ShapeUtil::GetMutableSubshape(inst->mutable_shape(), index) ->set_dynamic_dimension(dim, false); } MarkAsChanged(); } void DynamicDimensionInferenceVisitor::SetDynamicSizes( HloInstruction* inst, const ShapeIndex& index, absl::Span<HloInstruction* const> sizes) { const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index); CHECK(subshape.IsArray() && subshape.rank() == sizes.size()); for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) { if (sizes[dimension] != nullptr) { SetDynamicSize(inst, index, dimension, sizes[dimension]); } } } absl::Status DynamicDimensionInferenceVisitor::DefaultAction( HloInstruction* hlo) { return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { return UnimplementedStrCat( "Asked to propagate a dynamic dimension from hlo ", operand->name(), "@", index.ToString(), "@", dimension, " to hlo ", hlo->ToString(), ", which is not implemented."); }); } absl::Status DynamicDimensionInferenceVisitor::HandleGetTupleElement( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { if (hlo->tuple_index() != index[0]) { return absl::OkStatus(); } ShapeIndex new_index(ShapeIndexView(index).subspan(1)); SetDynamicSize(hlo, new_index, dimension, dynamic_size); return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleTuple( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction*, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { index.push_front(operand_index); SetDynamicSize(hlo, index, dimension, dynamic_size); return absl::OkStatus(); })); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleBroadcast( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { int64_t broadcast_dim = hlo->dimensions(dimension); SetDynamicSize(hlo, {}, broadcast_dim, dynamic_size); return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleConstant( HloInstruction* hlo) { if (!hlo->shape().is_dynamic()) { return absl::OkStatus(); } auto* constant = Cast<HloConstantInstruction>(hlo); ShapeTree<bool> do_pad(constant->shape(), false); Shape padded_shape = constant->shape(); bool pad_any = false; TF_RETURN_IF_ERROR(ShapeUtil::ForEachMutableSubshapeWithStatus( &padded_shape, [&](Shape* subshape, const ShapeIndex& index) -> absl::Status { if (!subshape->IsArray()) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(bool requires_pad, RequiresPadToStatic(hlo, index)); if (requires_pad) { pad_any = *do_pad.mutable_element(index) = true; *subshape = ShapeUtil::MakeStaticShape(*subshape); } return absl::OkStatus(); })); if (!pad_any) { return absl::OkStatus(); } Literal padded_literal(padded_shape); do_pad.ForEachElement([&](const ShapeIndex& index, bool requires_pad) { const Shape& subshape = ShapeUtil::GetSubshape(padded_shape, index); if (!subshape.IsArray()) { return absl::OkStatus(); } TF_RETURN_IF_ERROR(padded_literal.CopyFrom(constant->literal(), index, index, true)); if (!requires_pad) { for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) { if (subshape.is_dynamic_dimension(dimension)) { padded_literal.SetDynamicSize( dimension, index, constant->literal().GetDynamicSize(dimension, index)); } } } return absl::OkStatus(); }); auto* padded_constant = hlo->AddInstruction( HloInstruction::CreateConstant(std::move(padded_literal))); TF_RETURN_IF_ERROR(constant->ReplaceAllUsesWith(padded_constant)); SetVisited(*padded_constant); TF_RETURN_IF_ERROR(do_pad.ForEachElementWithStatus( [&](const ShapeIndex& index, bool requires_pad) -> absl::Status { if (!requires_pad) { return absl::OkStatus(); } const Shape& subshape = ShapeUtil::GetSubshape(constant->shape(), index); TF_RET_CHECK(subshape.IsArray()); for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) { if (!subshape.is_dynamic_dimension(dimension)) { continue; } HloInstruction* dynamic_size = hlo->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( constant->literal().GetDynamicSize(dimension, index)))); SetVisited(*dynamic_size); SetDynamicSize(padded_constant, index, dimension, dynamic_size); } return absl::OkStatus(); })); MarkAsChanged(); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleCustomCall( HloInstruction* hlo) { if (hlo->custom_call_target() == "PadToStatic") { for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) { if (hlo->operand(0)->shape().is_dynamic_dimension(i)) { HloInstruction* dynamic_size = hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement( ShapeUtil::MakeScalarShape(S32), hlo, i + 1)); ShapeIndex data_output = {0}; SetDynamicSize(hlo, data_output, i, dynamic_size); } } return absl::OkStatus(); } if (!CanInfer(hlo)) { return absl::OkStatus(); } if (custom_call_handler_) { TF_RETURN_IF_ERROR(custom_call_handler_(hlo, parent_)); } else { TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { if (hlo->custom_call_target() == "SliceToDynamic" || hlo->custom_call_target() == "Sharding" || (absl::StartsWith(hlo->custom_call_target(), "Resize") && (dimension == 0 || dimension == 3))) { SetDynamicSize(hlo, {}, dimension, dynamic_size); return absl::OkStatus(); } if (hlo->custom_call_target() == "DynamicReduceWindowSamePadding") { if (hlo->operand_count() > 2) { return Unimplemented( "DynamicReduceWindowSamePadding doesn't support variadic " "reduce window %s", hlo->ToString()); } return HandleDynamicWindowSamePadding(hlo, dynamic_size, operand_index, dimension); } if (hlo->custom_call_target() == "DynamicSelectAndScatterSamePadding") { if (operand_index == 1) { return absl::OkStatus(); } SetDynamicSize(hlo, {}, dimension, dynamic_size); return absl::OkStatus(); } if (hlo->custom_call_target() == "DynamicConvolutionInputGrad") { return HandleDynamicConvolutionInputGrad(hlo, operand_index, dimension); } if (hlo->custom_call_target() == "DynamicConvolutionKernelGrad") { return HandleDynamicConvolutionKernelGrad(hlo, operand_index, dimension); } if (hlo->custom_call_target() == "DynamicConvolutionForward") { return HandleDynamicConvolutionForward(hlo, operand_index, dimension, dynamic_size); } return Unimplemented( "CustomCall \"%s\" is not supported to have a dynamic dimension", hlo->custom_call_target()); })); } return InsertPadToStaticOnInstruction(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleSort(HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dynamic_dimension, int64_t operand_index, HloInstruction* dynamic_size) { HloSortInstruction* sort = Cast<HloSortInstruction>(hlo); if (sort->values_count() == 0) { SetDynamicSize(hlo, {}, dynamic_dimension, dynamic_size); } else { SetDynamicSize(hlo, {operand_index}, dynamic_dimension, dynamic_size); } return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandlePad(HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { if (operand_index != 0) { return Unimplemented( "Dynamic dimension on padding value is not supported"); } const PaddingConfig_PaddingConfigDimension& padding_config = hlo->padding_config().dimensions(dimension); HloInstruction* dynamic_size_adjusted = dynamic_size; if (padding_config.interior_padding() != 0) { HloInstruction* one = hlo->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(1))); HloInstruction* zero = hlo->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(0))); HloInstruction* interior_padding = hlo->parent()->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( padding_config.interior_padding()))); dynamic_size_adjusted = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( dynamic_size_adjusted->shape(), HloOpcode::kSubtract, dynamic_size_adjusted, one)); dynamic_size_adjusted = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( dynamic_size_adjusted->shape(), HloOpcode::kMaximum, dynamic_size_adjusted, zero)); dynamic_size_adjusted = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( dynamic_size_adjusted->shape(), HloOpcode::kMultiply, dynamic_size_adjusted, interior_padding)); dynamic_size_adjusted = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( dynamic_size_adjusted->shape(), HloOpcode::kAdd, dynamic_size_adjusted, dynamic_size)); } HloInstruction* adjustment = hlo->parent()->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( padding_config.edge_padding_low() + padding_config.edge_padding_high()))); dynamic_size_adjusted = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( dynamic_size_adjusted->shape(), HloOpcode::kAdd, dynamic_size_adjusted, adjustment)); SetDynamicSize(hlo, {}, dimension, dynamic_size_adjusted); return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleReduce( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } auto* reduce = Cast<HloReduceInstruction>(hlo); int64_t rank = -1; TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( reduce->shape(), [&](const Shape& subshape, const ShapeIndex& index) -> absl::Status { if (!subshape.IsArray()) { return absl::OkStatus(); } if (rank < 0) { rank = subshape.rank(); } else { TF_RET_CHECK(rank == subshape.rank()); } return absl::OkStatus(); })); TF_RET_CHECK(rank >= 0); absl::InlinedVector<HloInstruction*, 4> dynamic_sizes(rank, nullptr); TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { int64_t operand_count = reduce->operand_count(); CHECK_EQ(operand_count % 2, 0); if (operand_index >= reduce->input_count()) { return absl::OkStatus(); } if (absl::c_count(reduce->dimensions(), dimension) != 0) { return absl::OkStatus(); } int64_t dimensions_not_reduced_count = 0; for (int64_t i = 0; i < operand->shape().rank(); ++i) { if (dimension == i) { dynamic_sizes[dimensions_not_reduced_count] = dynamic_size; return absl::OkStatus(); } if (!absl::c_linear_search(reduce->dimensions(), i)) { dimensions_not_reduced_count++; } } return absl::OkStatus(); })); ShapeUtil::ForEachSubshape( reduce->shape(), [&](const Shape& subshape, ShapeIndex shape_index) { if (!subshape.IsArray()) { return; } SetDynamicSizes(reduce, shape_index, dynamic_sizes); }); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleDot(HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } absl::InlinedVector<HloInstruction*, 4> dynamic_sizes(hlo->shape().rank(), nullptr); TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex operand_shape_index, int64_t operand_dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { HloInstruction* dot = hlo; const DotDimensionNumbers& dimension_numbers = dot->dot_dimension_numbers(); absl::flat_hash_map<int64_t, int64_t> result_dim_mapping; int64_t current_result_dims = 0; bool lhs = operand_index == 0; if (lhs) { for (int64_t i : dimension_numbers.lhs_batch_dimensions()) { result_dim_mapping[i] = current_result_dims++; } } else { for (int64_t i : dimension_numbers.rhs_batch_dimensions()) { result_dim_mapping[i] = current_result_dims++; } } for (int64_t i = 0; i < dot->operand(0)->shape().rank(); i++) { if (absl::c_linear_search( dimension_numbers.lhs_contracting_dimensions(), i)) { continue; } if (absl::c_linear_search(dimension_numbers.lhs_batch_dimensions(), i)) { continue; } if (lhs) { result_dim_mapping[i] = current_result_dims; } current_result_dims++; } for (int64_t i = 0; i < dot->operand(1)->shape().rank(); i++) { if (absl::c_linear_search( dimension_numbers.rhs_contracting_dimensions(), i)) { continue; } if (absl::c_linear_search(dimension_numbers.rhs_batch_dimensions(), i)) { continue; } if (!lhs) { result_dim_mapping[i] = current_result_dims; } current_result_dims++; } auto iter = result_dim_mapping.find(operand_dimension); if (iter != result_dim_mapping.end()) { dynamic_sizes[iter->second] = dynamic_size; } return absl::OkStatus(); })); SetDynamicSizes(hlo, {}, dynamic_sizes); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleTranspose( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { int64_t permuted_dim = -1; for (int64_t i = 0; i < hlo->dimensions().size(); ++i) { if (hlo->dimensions()[i] == dimension) { TF_RET_CHECK(permuted_dim == -1); permuted_dim = i; } } SetDynamicSize(hlo, {}, permuted_dim, dynamic_size); return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleConvolution( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { HloInstruction* conv = hlo; const ConvolutionDimensionNumbers& dimension_numbers = conv->convolution_dimension_numbers(); if (operand_index == 0) { if (dimension == dimension_numbers.input_batch_dimension()) { SetDynamicSize(conv, {}, dimension_numbers.output_batch_dimension(), dynamic_size); return absl::OkStatus(); } if (dimension == dimension_numbers.input_feature_dimension()) { return absl::OkStatus(); } } else { if (dimension == dimension_numbers.kernel_input_feature_dimension()) { return absl::OkStatus(); } } return Unimplemented("Dynamic Spatial Convolution is not supported: %s", conv->ToString()); }); } absl::Status DynamicDimensionInferenceVisitor::HandleConcatenate( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } int64_t static_size = 0; std::vector<HloInstruction*> dynamic_concat_dims; for (int64_t i = 0; i < hlo->operand_count(); ++i) { HloInstruction* concat_dim_size = nullptr; for (int64_t dimension = 0; dimension < hlo->operand(i)->shape().rank(); ++dimension) { if (dimension == hlo->concatenate_dimension()) { HloInstruction* dynamic_size = parent_->GetDynamicSize(hlo->mutable_operand(i), {}, dimension); concat_dim_size = dynamic_size; } } if (concat_dim_size == nullptr) { static_size += hlo->operand(i)->shape().dimensions(hlo->concatenate_dimension()); } else { dynamic_concat_dims.push_back(concat_dim_size); } } std::vector<HloInstruction*> dynamic_sizes(hlo->shape().rank(), nullptr); if (!dynamic_concat_dims.empty()) { HloInstruction* dim_size_total = hlo->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(static_size))); for (HloInstruction* dynamic_dim : dynamic_concat_dims) { dim_size_total = hlo->parent()->AddInstruction( HloInstruction::CreateBinary(dim_size_total->shape(), HloOpcode::kAdd, dim_size_total, dynamic_dim)); } dynamic_sizes[hlo->concatenate_dimension()] = dim_size_total; } TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { TF_RET_CHECK(index.empty()); int64_t concatenate_dimension = hlo->concatenate_dimension(); if (concatenate_dimension == dimension) { return absl::OkStatus(); } dynamic_sizes[dimension] = dynamic_size; return absl::OkStatus(); })); SetDynamicSizes(hlo, {}, dynamic_sizes); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleGetDimensionSize( HloInstruction* gds) { int64_t dim = gds->dimension(); TF_RET_CHECK(dim < gds->operand(0)->shape().rank()) << gds->ToString(); HloInstruction* operand = gds->mutable_operand(0); TF_RET_CHECK(dim < operand->shape().rank()); HloInstruction* replacement = parent_->GetDynamicSize(operand, {}, dim); HloComputation* computation = gds->parent(); if (replacement == nullptr && !gds->operand(0)->shape().is_dynamic_dimension(dim)) { TF_RET_CHECK(dim < gds->operand(0)->shape().rank()); int32_t size = gds->operand(0)->shape().dimensions(dim); replacement = computation->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(size)), gds->name()); } if (replacement != nullptr) { TF_RETURN_IF_ERROR(gds->ReplaceAllUsesWith(replacement)); parent_->ReplaceAllDynamicDimensionUsesWith(gds, replacement); MarkAsChanged(); } return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleSetDimensionSize( HloInstruction* hlo) { bool dimension_is_static = false; const HloInstruction* size = hlo->operand(1); if (size->opcode() == HloOpcode::kConstant) { TF_RET_CHECK(size->shape().rank() == 0); if (size->literal().Get<int32_t>({}) == hlo->shape().dimensions(hlo->dimension()) && !hlo->shape().is_dynamic_dimension(hlo->dimension())) { dimension_is_static = true; } } if (!dimension_is_static) { SetDynamicSize(hlo, {}, hlo->dimension(), hlo->mutable_operand(1), false); } TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { TF_RET_CHECK(operand_index == 0); if (dimension != hlo->dimension()) { SetDynamicSize(hlo, index, dimension, dynamic_size, false); } return absl::OkStatus(); })); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleDynamicConvolutionForward( HloInstruction* hlo, int64_t operand_index, int64_t dimension, HloInstruction* dynamic_size) { if (!CanInfer(hlo)) { return absl::OkStatus(); } TF_RET_CHECK(operand_index == 0); const ConvolutionDimensionNumbers& dimension_numbers = hlo->convolution_dimension_numbers(); if (dimension == dimension_numbers.input_batch_dimension()) { SetDynamicSize(hlo, {}, dimension_numbers.output_batch_dimension(), dynamic_size); return absl::OkStatus(); } for (int64_t spatial_dim_index = 0; spatial_dim_index < dimension_numbers.input_spatial_dimensions_size(); ++spatial_dim_index) { int64_t input_spatial_dim = dimension_numbers.input_spatial_dimensions(spatial_dim_index); int64_t output_spatial_dim = dimension_numbers.output_spatial_dimensions(spatial_dim_index); if (dimension == input_spatial_dim) { WindowDimension window_dim = hlo->window().dimensions(spatial_dim_index); DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize( dynamic_size, window_dim.size(), window_dim.window_dilation(), window_dim.stride(), hlo->padding_type()); TF_RET_CHECK(window_dim.base_dilation() == 1); SetDynamicSize(hlo, {}, output_spatial_dim, dynamic_window_dims.output_size); return absl::OkStatus(); } } return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleDynamicWindowSamePadding( HloInstruction* hlo, HloInstruction* dynamic_size, int64_t operand_index, int64_t dimension) { if (!CanInfer(hlo)) { return absl::OkStatus(); } const Window& window = hlo->window(); const WindowDimension& window_dim = window.dimensions(dimension); if (!window_util::IsTrivialWindowDimension(window_dim)) { DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize( dynamic_size, window_dim.size(), window_dim.window_dilation(), window_dim.stride(), PaddingType::PADDING_SAME); SetDynamicSize(hlo, {}, dimension, dynamic_window_dims.output_size); } else { SetDynamicSize(hlo, {}, dimension, dynamic_size); } return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleDynamicConvolutionInputGrad( HloInstruction* hlo, int64_t operand_index, int64_t dimension) { if (!CanInfer(hlo)) { return absl::OkStatus(); } HloInstruction* input_sizes = hlo->mutable_operand(0); HloComputation* comp = hlo->parent(); TF_RET_CHECK(input_sizes->shape().rank() == 1) << hlo->ToString(); TF_RET_CHECK(input_sizes->shape().element_type() == S32) << hlo->ToString(); TF_RET_CHECK(input_sizes->shape().dimensions(0) == hlo->shape().dimensions_size()) << hlo->ToString(); HloInstruction* slice = comp->AddInstruction( HloInstruction::CreateSlice(ShapeUtil::MakeShape(S32, {1}), input_sizes, {dimension}, {dimension + 1}, {1})); HloInstruction* reshape = comp->AddInstruction( HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice)); SetDynamicSize(hlo, {}, dimension, reshape); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleDynamicConvolutionKernelGrad( HloInstruction* hlo, int64_t operand_index, int64_t dimension) { return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::PassThroughDynamicDimension( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } ShapeTree<absl::InlinedVector<HloInstruction*, 2>> dynamic_sizes( hlo->shape()); TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { const Shape& subshape = ShapeUtil::GetSubshape(hlo->shape(), index); auto* element = dynamic_sizes.mutable_element(index); element->resize(subshape.rank(), nullptr); (*element)[dimension] = dynamic_size; return absl::OkStatus(); })); dynamic_sizes.ForEachElement([&](const ShapeIndex& index, const auto& sizes) { if (sizes.empty()) { return; } SetDynamicSizes(hlo, index, sizes); }); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleDomain( HloInstruction* hlo) { return PassThroughDynamicDimension(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleAsyncStart( HloInstruction* hlo) { if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(), parent_->execution_threads_)) { return absl::OkStatus(); } return DefaultAction(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleAsyncDone( HloInstruction* hlo) { if (!HloInstruction::IsThreadIncluded(hlo->async_execution_thread(), parent_->execution_threads_)) { return InsertPadToStaticOnInstruction(hlo); } return DefaultAction(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseUnary( HloInstruction* hlo) { return PassThroughDynamicDimension(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleSelect( HloInstruction* hlo) { return HandleElementwiseNary(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseNary( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } HloComputation* comp = hlo->parent(); absl::InlinedVector<absl::InlinedVector<HloInstruction*, 2>, 2> operand_sizes( hlo->shape().rank(), absl::InlinedVector<HloInstruction*, 2>(hlo->operand_count(), nullptr)); TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { TF_RET_CHECK(index.empty()); operand_sizes[dimension][operand_index] = dynamic_size; return absl::OkStatus(); })); absl::InlinedVector<HloInstruction*, 2> existing_sizes(hlo->shape().rank(), nullptr); for (int operand_index = 0; operand_index < hlo->operand_count(); ++operand_index) { for (int64_t dimension = 0; dimension < hlo->shape().rank(); ++dimension) { HloInstruction* dynamic_size = operand_sizes[dimension][operand_index]; if (dynamic_size == nullptr) { continue; } HloInstruction* existing_size = existing_sizes[dimension]; if (existing_size == nullptr) { existing_sizes[dimension] = dynamic_size; } else if (existing_sizes[dimension] != dynamic_size) { TF_RETURN_IF_ERROR( InsertShapeCheck(existing_size, dynamic_size, true)); auto one = comp->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::One(S32))); auto operand_needs_broadcast = comp->AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {}), dynamic_size, existing_size, ComparisonDirection::kLt)); auto is_one = comp->AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {}), dynamic_size, one, ComparisonDirection::kEq)); operand_needs_broadcast = comp->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one, operand_needs_broadcast)); auto existing_needs_broadcast = comp->AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {}), existing_size, dynamic_size, ComparisonDirection::kLt)); is_one = comp->AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {}), existing_size, one, ComparisonDirection::kEq)); existing_needs_broadcast = comp->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(PRED, {}), HloOpcode::kAnd, is_one, existing_needs_broadcast)); auto needs_broadcast = comp->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(PRED, {}), HloOpcode::kOr, operand_needs_broadcast, existing_needs_broadcast)); auto max_size = comp->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(S32), HloOpcode::kMaximum, dynamic_size, existing_size)); auto min_size = comp->AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(S32), HloOpcode::kMinimum, dynamic_size, existing_size)); auto select_size = comp->AddInstruction(HloInstruction::CreateTernary( ShapeUtil::MakeScalarShape(S32), HloOpcode::kSelect, needs_broadcast, max_size, min_size)); existing_sizes[dimension] = select_size; } } } SetDynamicSizes(hlo, {}, existing_sizes); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleElementwiseBinary( HloInstruction* hlo) { return HandleElementwiseNary(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleClamp( HloInstruction* hlo) { return PassThroughDynamicDimension(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleDynamicReshape( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } HloDynamicReshapeInstruction* dynamic_reshape = Cast<HloDynamicReshapeInstruction>(hlo); for (int64_t i = 0; i < hlo->shape().rank(); ++i) { if (hlo->shape().is_dynamic_dimension(i)) { SetDynamicSize(hlo, {}, i, dynamic_reshape->dim_sizes(i)); } } return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleReshape( HloInstruction* const hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } VLOG(2) << "Handle reshape: " << hlo->ToString() << "\n"; absl::InlinedVector<HloInstruction*, 2> dynamic_sizes(hlo->shape().rank(), nullptr); using ReshapeGroup = std::pair<int64_t, int64_t>; using ReshapeGroupPair = std::pair<ReshapeGroup, ReshapeGroup>; auto is_reverse_reshape_group_pair = [&](const HloInstruction* op1, const ReshapeGroupPair& p1, const HloInstruction* op2, const ReshapeGroupPair& p2) -> bool { return ShapeUtil::EqualStructure( ShapeUtil::GetSubshape( op1->operand(0)->shape(), ShapeIndex(p1.first.first, p1.first.second)), ShapeUtil::GetSubshape( op2->operand(0)->shape(), ShapeIndex(p2.second.first, p2.second.second))) && ShapeUtil::EqualStructure( ShapeUtil::GetSubshape( op1->shape(), ShapeIndex(p1.second.first, p1.second.second)), ShapeUtil::GetSubshape( op2->operand(0)->shape(), ShapeIndex(p2.first.first, p2.first.second))); }; auto find_reshape_group_pair = [](HloInstruction* reshape, int64_t input_dynamic_dimension) { VLOG(2) << "Find reshape pair: " << reshape->ToString() << "\n"; auto common_factors = CommonFactors(reshape->operand(0)->shape().dimensions(), reshape->shape().dimensions()); ReshapeGroup input_dim = {-1, -1}, output_dim = {-1, -1}; bool found = false; for (int64_t i = 0; i < common_factors.size() - 1; ++i) { auto start = common_factors[i]; auto end = common_factors[i + 1]; if (input_dynamic_dimension >= start.first && input_dynamic_dimension < end.first) { input_dim.first = start.first; input_dim.second = end.first; output_dim.first = start.second; output_dim.second = end.second; VLOG(3) << "Found common_factor group pair: " << input_dim.first << "," << input_dim.second << "->" << output_dim.first << "," << output_dim.second << "\n"; found = true; break; } } CHECK(found); return ReshapeGroupPair(input_dim, output_dim); }; auto reshape_group_pair_needs_flatten = [](const ReshapeGroupPair& reshape_pair) { return reshape_pair.first.second - reshape_pair.first.first > 1 && reshape_pair.second.second - reshape_pair.second.first > 1; }; std::function<bool(HloInstruction*, const ReshapeGroupPair&, int64_t)> find_reverse_past_reshape = [&](HloInstruction* op, const ReshapeGroupPair reshape_pair, int64_t dynamic_dimension_size) { VLOG(2) << "Find reverse past reshape from " << op->ToString() << " for " << dynamic_dimension_size << "\n"; absl::InlinedVector<int64_t, 4> found_dims; for (int op_dim_index = 0; op_dim_index < op->shape().rank(); ++op_dim_index) { if (op->shape().dimensions(op_dim_index) == dynamic_dimension_size) { found_dims.push_back(op_dim_index); } } if (found_dims.empty()) { return false; } VLOG(3) << "Found " << found_dims.size() << "\n"; if (op->opcode() == HloOpcode::kReshape) { for (auto op_dim_index : found_dims) { auto orig_reshape_pair = find_reshape_group_pair(op, op_dim_index); if (is_reverse_reshape_group_pair(op, orig_reshape_pair, hlo, reshape_pair)) { TF_CHECK_OK(ForEachOperandDynamicDimension( op, [&](HloInstruction* operand, ShapeIndex index, int64_t op_dynamic_dimension, int64_t operand_index, HloInstruction* operand_dynamic_size) -> absl::Status { if (op_dynamic_dimension >= orig_reshape_pair.first.first && op_dynamic_dimension < orig_reshape_pair.first.second) { auto dynamic_size = parent_->GetDynamicSize(op, {}, op_dynamic_dimension); CHECK_NE(dynamic_size, nullptr); auto hlo_dimension_index = op_dynamic_dimension - orig_reshape_pair.first.first + reshape_pair.second.first; dynamic_sizes[hlo_dimension_index] = dynamic_size; } return absl::OkStatus(); })); return true; } } } for (auto operand : op->mutable_operands()) { if (find_reverse_past_reshape(operand, reshape_pair, dynamic_dimension_size)) { return true; } VLOG(3) << "Checking " << operand->ToString() << "\n"; } return false; }; absl::flat_hash_map<int64_t, ReshapeGroupPair> reshape_group_pairs; bool need_flatten_unflatten = hlo->inferred_dimension() != -1 && hlo->shape().dimensions(hlo->inferred_dimension()) == 1; TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t input_dynamic_dimension, int64_t operand_index, HloInstruction* operand_dynamic_size) -> absl::Status { auto reshape_pair = find_reshape_group_pair(hlo, input_dynamic_dimension); reshape_group_pairs[input_dynamic_dimension] = reshape_pair; if (reshape_group_pair_needs_flatten(reshape_pair)) { need_flatten_unflatten = true; } return absl::OkStatus(); })); if (need_flatten_unflatten) { if (hlo->inferred_dimension() != -1) { HloInstruction* operand = hlo->mutable_operand(0); HloComputation* comp = hlo->parent(); HloInstruction* dynamic_size = comp->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1))); int64_t static_size = 1; for (int64_t i = 0; i < operand->shape().rank(); i++) { HloInstruction* dynamic_dim_size = parent_->GetDynamicSize(operand, {}, i); if (dynamic_dim_size == nullptr) { static_size *= operand->shape().dimensions(i); } else { dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size, dynamic_dim_size)); } } HloInstruction* static_size_hlo = comp->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(static_size))); dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size, static_size_hlo)); int64_t size_without_inferred_dim = ShapeUtil::ElementsIn(hlo->shape()) / hlo->shape().dimensions(hlo->inferred_dimension()); HloInstruction* size_without_inferred_dim_hlo = comp->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(size_without_inferred_dim))); dynamic_size = comp->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kDivide, dynamic_size, size_without_inferred_dim_hlo)); dynamic_sizes[hlo->inferred_dimension()] = dynamic_size; VLOG(3) << "Need to decompose a dynamic reshape to flatten-unflatten pair. " << comp->parent()->ToString(); SetDynamicSizes(hlo, {}, dynamic_sizes); return absl::OkStatus(); } return Internal( "Need inferred dimension to be set to " "flatten-unflatten pair. %s", hlo->ToString()); } TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t input_dynamic_dimension, int64_t operand_index, HloInstruction* operand_dynamic_size) -> absl::Status { HloInstruction* const reshape = hlo; if (reshape->shape().rank() == 0) { VLOG(0) << "Reshaping a dynamic dimension into a scalar, which has " "undefined behavior when input size is 0. The offending " "instruction is: " << reshape->ToString(); return absl::OkStatus(); } auto iter = reshape_group_pairs.find(input_dynamic_dimension); CHECK(iter != reshape_group_pairs.end()); ReshapeGroupPair reshape_group_pair = iter->second; auto output_dim_start = reshape_group_pair.second.first, output_dim_end = reshape_group_pair.second.second; int64_t output_dynamic_dimension = -1; if (operand->shape().dimensions(input_dynamic_dimension) == 1) { if (input_dynamic_dimension == 0) { output_dynamic_dimension = 0; } else if (input_dynamic_dimension == operand->shape().rank() - 1) { output_dynamic_dimension = reshape->shape().rank() - 1; } if (output_dynamic_dimension == -1) { return Unimplemented( "Dynamic degenerated dimension that's not most-minor nor " "most-major is not supported %s", reshape->ToString()); } } if (output_dynamic_dimension == -1 && output_dim_end - output_dim_start == 1) { output_dynamic_dimension = output_dim_start; } if (output_dynamic_dimension == -1 && output_dim_end - output_dim_start > 1) { output_dynamic_dimension = reshape->inferred_dimension(); if (output_dynamic_dimension == -1) { for (int64_t i = output_dim_start; i < output_dim_end; ++i) { if (reshape->shape().is_dynamic_dimension(i)) { output_dynamic_dimension = i; } } } if (output_dynamic_dimension == -1) { std::vector<int64_t> output_non_degenerated; for (int64_t i = output_dim_start; i < output_dim_end; ++i) { if (reshape->shape().dimensions(i) != 1) { output_non_degenerated.push_back(i); } } if (output_non_degenerated.size() == 1) { output_dynamic_dimension = output_non_degenerated[0]; } } if (output_dynamic_dimension == -1 && find_reverse_past_reshape( hlo->mutable_operand(0), reshape_group_pair, hlo->mutable_operand(0)->shape().dimensions( input_dynamic_dimension))) { return absl::OkStatus(); } if (output_dynamic_dimension == -1) { return InvalidArgument( "Reshape's input dynamic dimension is decomposed into " "multiple output dynamic dimensions, but the constraint is " "ambiguous and XLA can't infer the output dimension %s. ", hlo->ToString()); } } CHECK_NE(output_dynamic_dimension, -1); const int64_t input_dim_size = operand->shape().dimensions(input_dynamic_dimension); const int64_t output_dim_size = reshape->shape().dimensions(output_dynamic_dimension); VLOG(2) << "input_dim_size: " << input_dim_size << " output_dim_size: " << output_dim_size; if (input_dim_size == output_dim_size) { dynamic_sizes[output_dynamic_dimension] = operand_dynamic_size; } if (input_dim_size > output_dim_size) { TF_RET_CHECK(input_dim_size % output_dim_size == 0) << reshape->ToString(); const int64_t divisor = input_dim_size / output_dim_size; HloInstruction* divisor_hlo = hlo->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(divisor))); HloInstruction* new_dynamic_size = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( operand_dynamic_size->shape(), HloOpcode::kDivide, operand_dynamic_size, divisor_hlo)); dynamic_sizes[output_dynamic_dimension] = new_dynamic_size; } if (input_dim_size < output_dim_size) { HloInstruction* output_dynamic_size = dynamic_sizes[output_dynamic_dimension]; if (output_dynamic_size == nullptr) { output_dynamic_size = hlo->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(output_dim_size))); } HloInstruction* divisor_hlo = hlo->parent()->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>( operand->shape().dimensions(input_dynamic_dimension)))); HloInstruction* new_dynamic_size = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( output_dynamic_size->shape(), HloOpcode::kDivide, output_dynamic_size, divisor_hlo)); new_dynamic_size = hlo->parent()->AddInstruction(HloInstruction::CreateBinary( output_dynamic_size->shape(), HloOpcode::kMultiply, new_dynamic_size, operand_dynamic_size)); dynamic_sizes[output_dynamic_dimension] = new_dynamic_size; } return absl::OkStatus(); })); SetDynamicSizes(hlo, {}, dynamic_sizes); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleReduceWindow( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } ShapeTree<absl::InlinedVector<HloInstruction*, 2>> dynamic_sizes( hlo->shape()); TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { auto* reduce_window = Cast<HloReduceWindowInstruction>(hlo); const WindowDimension& window_dim = reduce_window->window().dimensions(dimension); if (operand_index >= reduce_window->input_count()) { return absl::OkStatus(); } if (!window_util::IsTrivialWindowDimension(window_dim)) { DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize( dynamic_size, window_dim.size(), window_dim.window_dilation(), window_dim.stride(), PaddingType::PADDING_VALID); dynamic_size = dynamic_window_dims.output_size; } ShapeUtil::ForEachSubshape( reduce_window->shape(), [&](const Shape& subshape, ShapeIndex reduce_window_result_index) { if (!ShapeUtil::IsLeafIndex(reduce_window->shape(), reduce_window_result_index)) { return; } auto* leaf_dynamic_sizes = dynamic_sizes.mutable_element(reduce_window_result_index); leaf_dynamic_sizes->resize(subshape.rank(), nullptr); (*leaf_dynamic_sizes)[dimension] = dynamic_size; }); return absl::OkStatus(); })); dynamic_sizes.ForEachElement( [&](const ShapeIndex& shape_index, const absl::InlinedVector<HloInstruction*, 2> sizes) { if (sizes.empty()) { return; } SetDynamicSizes(hlo, shape_index, sizes); }); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleSelectAndScatter( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { if (operand_index == 1) { return absl::OkStatus(); } SetDynamicSize(hlo, {}, dimension, dynamic_size); return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleSlice( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex , int64_t dimension, int64_t , HloInstruction* dynamic_size) -> absl::Status { int64_t start = hlo->slice_starts(dimension); int64_t limit = hlo->slice_limits(dimension); int64_t stride = hlo->slice_strides(dimension); int64_t size = CeilOfRatio<int64_t>(limit - start, stride); if (size == 1) { TF_RET_CHECK(!hlo->shape().is_dynamic_dimension(dimension)); return absl::OkStatus(); } TF_RET_CHECK(hlo->shape().is_dynamic_dimension(dimension)); if (start != 0) { dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kSubtract, dynamic_size, hlo->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(start))))); } if (stride != 1) { dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kAdd, dynamic_size, hlo->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(stride - 1))))); dynamic_size = hlo->AddInstruction(HloInstruction::CreateBinary( dynamic_size->shape(), HloOpcode::kDivide, dynamic_size, hlo->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>(stride))))); } SetDynamicSize(hlo, {}, dimension, dynamic_size); return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleDynamicSlice( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { if (hlo->shape().dimensions(dimension) == 1) { return absl::OkStatus(); } if (hlo->shape().dimensions(dimension) != hlo->operand(0)->shape().dimensions(dimension)) { return Unimplemented( "Dynamic dimension propagation on DynamicSlice where a partial " "dimension is selected %s", hlo->ToString()); } TF_RET_CHECK(operand_index == 0); TF_RET_CHECK(index.empty()); SetDynamicSize(hlo, {}, dimension, dynamic_size); return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleDynamicUpdateSlice( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } absl::InlinedVector<HloInstruction*, 2> output_dynamic_sizes( hlo->shape().rank(), nullptr); TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { TF_RET_CHECK(index.empty()); if (hlo->shape().dimensions(dimension) != hlo->operand(0)->shape().dimensions(dimension)) { return Unimplemented( "Dynamic dimension propagation on DynamicUpdateSlice where a " "partial dimension is selected %s", hlo->ToString()); } if (operand_index == 1 && hlo->operand(1)->shape().dimensions(dimension) < hlo->operand(0)->shape().dimensions(dimension)) { hlo->mutable_shape()->set_dynamic_dimension(dimension, false); return absl::OkStatus(); } output_dynamic_sizes[dimension] = dynamic_size; return absl::OkStatus(); })); SetDynamicSizes(hlo, {}, output_dynamic_sizes); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleReverse( HloInstruction* hlo) { return PassThroughDynamicDimension(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleGather( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } absl::InlinedVector<HloInstruction*, 2> output_dynamic_sizes( hlo->shape().rank(), nullptr); TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex , int64_t input_dynamic_dimension, int64_t operand_index, HloInstruction* dynamic_size) -> absl::Status { const GatherDimensionNumbers& gather_dims = hlo->gather_dimension_numbers(); if (operand_index == 0) { if (hlo->gather_slice_sizes()[input_dynamic_dimension] == 1) { return absl::OkStatus(); } if (hlo->gather_slice_sizes()[input_dynamic_dimension] == operand->shape().dimensions(input_dynamic_dimension)) { int64_t operand_dimension = 0; for (int64_t output_dimension : gather_dims.offset_dims()) { TF_RET_CHECK(output_dimension < hlo->shape().rank()); while (operand_dimension < operand->shape().rank() && absl::c_linear_search(gather_dims.collapsed_slice_dims(), operand_dimension)) { ++operand_dimension; } TF_RET_CHECK(operand_dimension < operand->shape().rank()); if (operand_dimension == input_dynamic_dimension) { output_dynamic_sizes[output_dimension] = dynamic_size; return absl::OkStatus(); } ++operand_dimension; } return Internal("Invalid instruction: %s", hlo->ToString()); } return Unimplemented( "Detects a dynamic dimension on the data input of gather, which " "is not supported: %s, %lld", hlo->ToString(), input_dynamic_dimension); } int64_t indices_rank = hlo->operand(1)->shape().rank(); if (gather_dims.index_vector_dim() == indices_rank) { ++indices_rank; } int64_t output_rank = hlo->shape().rank(); int64_t indices_dim = 0; for (int64_t output_dim = 0; output_dim < output_rank; ++output_dim) { if (!absl::c_linear_search(gather_dims.offset_dims(), output_dim)) { if (indices_dim == gather_dims.index_vector_dim()) { indices_dim++; } if (indices_dim++ == input_dynamic_dimension) { output_dynamic_sizes[output_dim] = dynamic_size; return absl::OkStatus(); } } } CHECK(indices_dim == indices_rank); return Unimplemented( "Detects a non-batch dynamic dimension of gather, " "which is not supported: %s", hlo->ToString()); })); SetDynamicSizes(hlo, {}, output_dynamic_sizes); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleConditional( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } std::vector<HloComputation*> new_branch_computations; std::vector<HloInstruction*> new_operands; ShapeTree<absl::flat_hash_map<int64_t, int64_t>> dynamic_output_mapping( hlo->shape()); bool need_rewrite = false; for (int64_t branch_index = 0; branch_index < hlo->branch_count(); ++branch_index) { std::vector<HloInstruction*> operands_to_add; absl::flat_hash_map<HloInstruction*, int64_t> dynamic_size_to_operand_id_index_map; const int64_t operand_index = branch_index + 1; int operand_count = hlo->operand(operand_index)->shape().tuple_shapes_size(); TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand( hlo, operand_index, [&](HloInstruction*, ShapeIndex, int64_t, int64_t, HloInstruction* dynamic_size) -> absl::Status { TF_RET_CHECK(hlo->operand(operand_index)->shape().IsTuple()) << "Only tuple typed inputs can have dynamic dimension. Please " "file a bug against XLA team."; const HloInstruction* tuple_operand = hlo->operand(operand_index); for (int64_t i = 0; i < tuple_operand->operand_count(); ++i) { if (dynamic_size == tuple_operand->operand(i)) { dynamic_size_to_operand_id_index_map[dynamic_size] = i; return absl::OkStatus(); } } auto iter = dynamic_size_to_operand_id_index_map.find(dynamic_size); if (iter == dynamic_size_to_operand_id_index_map.end()) { operands_to_add.push_back(dynamic_size); dynamic_size_to_operand_id_index_map[dynamic_size] = operand_count++; } return absl::OkStatus(); })); HloInstruction* original_input = hlo->mutable_operand(operand_index); HloComputation* branch_computation = hlo->branch_computation(branch_index); HloComputation* new_computation = branch_computation; CallInliner::InlinedInstructionMap inline_map; HloInstruction* new_operand = hlo->mutable_operand(operand_index); Shape new_param_shape = branch_computation->parameter_instruction(0)->shape(); if (!operands_to_add.empty()) { TF_RET_CHECK(original_input->shape().IsTuple()); need_rewrite = true; new_operand = TupleUtil::AppendSuffix(original_input, operands_to_add); for (HloInstruction* operand : operands_to_add) { ShapeUtil::AppendShapeToTuple(operand->shape(), &new_param_shape); } TF_ASSIGN_OR_RETURN( std::tie(new_computation, inline_map), WidenComputation(branch_computation, new_param_shape)); } DynamicParameterBinding dynamic_parameter_binding; TF_RETURN_IF_ERROR(ForEachDynamicDimensionInOperand( hlo, operand_index, [&](HloInstruction*, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { DynamicParameterBinding::DynamicSizeParameter dynamic_parameter{ 0, {dynamic_size_to_operand_id_index_map[dynamic_size]}}; DynamicParameterBinding::DynamicDimension dynamic_dimension{ 0, {index}, dimension}; TF_RETURN_IF_ERROR(dynamic_parameter_binding.Bind(dynamic_parameter, dynamic_dimension)); return absl::OkStatus(); })); VLOG(2) << "dynamic_parameter_binding for conditional branch" << dynamic_parameter_binding; for (auto [old_inst, new_inst] : inline_map) { parent_->CopyMapping( old_inst, new_inst, &inline_map); } TF_ASSIGN_OR_RETURN( bool changed, DynamicDimensionInferenceVisitor::Run( new_computation, dataflow_analysis_, dynamic_parameter_binding, parent_, custom_call_handler_, shape_check_mode_, assertion_generator_)); if (changed) { MarkAsChanged(); } new_branch_computations.push_back(new_computation); new_operands.push_back(new_operand); } int tuple_count = hlo->shape().tuple_shapes_size(); ShapeUtil::ForEachSubshape( hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) { if (!subshape.IsArray()) { return; } for (int64_t i = 0; i < subshape.rank(); ++i) { for (int64_t j = 0; j < new_branch_computations.size(); ++j) { HloInstruction* dynamic_size = parent_->GetDynamicSize( new_branch_computations[j]->root_instruction(), index, i); if (dynamic_size) { if (dynamic_output_mapping.element(index).contains(i)) { continue; } dynamic_output_mapping.mutable_element(index)->emplace( i, tuple_count++); } } } }); for (int64_t branch_index = 0; branch_index < hlo->branch_count(); ++branch_index) { std::vector<HloInstruction*> hlos_to_add_in_root; ShapeUtil::ForEachSubshape( hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) { if (!subshape.IsArray()) { return; } for (int64_t i = 0; i < subshape.rank(); ++i) { if (dynamic_output_mapping.element(index).contains(i)) { HloInstruction* dynamic_size = parent_->GetDynamicSize( new_branch_computations[branch_index]->root_instruction(), index, i); if (dynamic_size) { hlos_to_add_in_root.push_back(dynamic_size); } else { HloInstruction* constant_size = new_branch_computations[branch_index]->AddInstruction( HloInstruction::CreateConstant( LiteralUtil::CreateR0<int32_t>( subshape.dimensions(i)))); hlos_to_add_in_root.push_back(constant_size); } } } }); VLOG(2) << "hlos_to_add_in_root:" << hlos_to_add_in_root.size(); if (!hlos_to_add_in_root.empty()) { need_rewrite = true; HloInstruction* new_branch_root = TupleUtil::AppendSuffix( new_branch_computations[branch_index]->root_instruction(), hlos_to_add_in_root); new_branch_computations[branch_index]->set_root_instruction( new_branch_root, true); } } if (!need_rewrite) { return absl::OkStatus(); } HloInstruction* new_conditional = hlo->parent()->AddInstruction(HloInstruction::CreateConditional( new_branch_computations[0]->root_instruction()->shape(), hlo->mutable_operand(0), new_branch_computations, new_operands)); HloInstruction* new_conditional_extracted = TupleUtil::ExtractPrefix( new_conditional, hlo->shape().tuple_shapes_size()); dynamic_output_mapping.ForEachElement( [&](const ShapeIndex& index, const absl::flat_hash_map<int64_t, int64_t>& dim_to_output) { for (auto iter : dim_to_output) { int64_t dim = iter.first; int64_t output_index = iter.second; HloInstruction* dynamic_size = hlo->parent()->AddInstruction( HloInstruction::CreateGetTupleElement( ShapeUtil::MakeScalarShape(S32), new_conditional, output_index)); SetDynamicSize(new_conditional, index, dim, dynamic_size, false); SetDynamicSize(new_conditional_extracted, index, dim, dynamic_size, false); } }); TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_conditional_extracted)); TF_RETURN_IF_ERROR(hlo->parent()->RemoveInstruction(hlo)); SetVisited(*new_conditional); SetVisited(*new_conditional_extracted); MarkAsChanged(); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::HandleMap(HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return HandleElementwiseNary(hlo); } absl::Status DynamicDimensionInferenceVisitor::HandleScatter( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } return ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex dynamic_index, int64_t dimension, int64_t operand_index, HloInstruction* operand_dynamic_size) -> absl::Status { if (operand_index == 0) { SetDynamicSize(hlo, {}, dimension, operand_dynamic_size); return absl::OkStatus(); } const ScatterDimensionNumbers& scatter_dims = hlo->scatter_dimension_numbers(); if (operand_index == 2 && absl::c_linear_search(scatter_dims.update_window_dims(), dimension)) { std::vector<int64_t> update_window_dims_in_operand; for (int64_t i = 0; i < hlo->operand(0)->shape().rank(); ++i) { if (absl::c_linear_search(scatter_dims.inserted_window_dims(), i)) { continue; } update_window_dims_in_operand.push_back(i); } for (int64_t i = 0; i < scatter_dims.update_window_dims_size(); ++i) { if (scatter_dims.update_window_dims(i) == dimension) { const Shape& operand_shape = hlo->operand(0)->shape(); const Shape& update_shape = hlo->operand(2)->shape(); int64_t dim_in_operand = update_window_dims_in_operand[i]; if (operand_shape.dimensions(dim_in_operand) != update_shape.dimensions(dimension)) { return Unimplemented( "Dynamic dimension of update window dims that are not the " "same as corresponding operand dim is not supported: " "%s : %d : %d : %d", hlo->ToString(), i, update_shape.dimensions(dimension), operand_shape.dimensions(dim_in_operand)); } HloInstruction* base_dynamic_size = parent_->GetDynamicSize( hlo->mutable_operand(0), {}, dim_in_operand); if (base_dynamic_size == nullptr || !operand_shape.is_dynamic_dimension(dim_in_operand)) { return absl::OkStatus(); } if (base_dynamic_size != operand_dynamic_size) { return Unimplemented( "Dynamic dimension size of update window dims that are not " "the same as corresponding operand dim is not supported: " "%s.\n Dynamic dim size of base: %s, dynamic dim size of " "update: %s", hlo->ToString(), base_dynamic_size->ToString(), operand_dynamic_size->ToString()); } } } } return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleWhile( HloInstruction* hlo) { if (!CanInfer(hlo)) { return absl::OkStatus(); } Shape original_shape = hlo->shape(); ShapeTree<absl::flat_hash_map<int64_t, int64_t>> dynamic_output_mapping( original_shape); std::vector<HloInstruction*> operands_to_add; const int original_tuple_count = original_shape.tuple_shapes_size(); int operand_count = original_tuple_count; DynamicParameterBinding binding_for_while; TF_RETURN_IF_ERROR(ForEachOperandDynamicDimension( hlo, [&](HloInstruction* operand, ShapeIndex index, int64_t dim, int64_t operand_num, HloInstruction* dynamic_size) -> absl::Status { TF_RET_CHECK(operand_num == 0); operands_to_add.push_back(dynamic_size); dynamic_output_mapping.mutable_element(index)->emplace(dim, operand_count); DynamicParameterBinding::DynamicDimension dynamic_dimension{ 0, index, dim, }; DynamicParameterBinding::DynamicSizeParameter dynamic_size_param{ 0, {operand_count}, }; TF_RETURN_IF_ERROR( binding_for_while.Bind(dynamic_size_param, dynamic_dimension)); ++operand_count; return absl::OkStatus(); })); if (operands_to_add.empty()) { return absl::OkStatus(); } HloInstruction* old_tuple_operand = hlo->mutable_operand(0); HloInstruction* old_body_root = hlo->while_body()->root_instruction(); TF_ASSIGN_OR_RETURN(WhileUtil::MakeInstructionsLiveInResult result, WhileUtil::MakeInstructionsLiveIn(hlo, operands_to_add)); TF_RET_CHECK(result.replacement_instr->opcode() == HloOpcode::kTuple); HloInstruction* new_tuple_operand = result.new_while_instr->mutable_operand(0); parent_->CopyMapping(old_tuple_operand, new_tuple_operand); hlo = result.new_while_instr; SetVisited(*hlo); for (auto [old_inst, new_inst] : result.while_body_instruction_map) { parent_->CopyMapping( old_inst, new_inst, &result.while_body_instruction_map); } parent_->CopyMapping(old_body_root, hlo->while_body()->root_instruction(), &result.while_body_instruction_map); for (auto [old_inst, new_inst] : result.while_condition_instruction_map) { parent_->CopyMapping( old_inst, new_inst, &result.while_condition_instruction_map); } TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run( hlo->while_body(), dataflow_analysis_, binding_for_while, parent_, custom_call_handler_, shape_check_mode_, assertion_generator_) .status()); TF_RETURN_IF_ERROR(DynamicDimensionInferenceVisitor::Run( hlo->while_condition(), dataflow_analysis_, binding_for_while, parent_, custom_call_handler_, shape_check_mode_, assertion_generator_) .status()); HloInstruction* body_root = hlo->while_body()->root_instruction(); std::vector<HloInstruction*> new_root_operands(body_root->operand_count(), nullptr); for (int i = 0; i < original_tuple_count; ++i) { new_root_operands[i] = body_root->AddInstruction(HloInstruction::CreateGetTupleElement( body_root->shape().tuple_shapes(i), body_root, i)); } TF_RETURN_IF_ERROR(dynamic_output_mapping.ForEachElementWithStatus( [&](const ShapeIndex& index, const absl::flat_hash_map<int64_t, int64_t>& dim_to_size) -> absl::Status { for (auto [dimension, output_index] : dim_to_size) { TF_RET_CHECK(new_root_operands[output_index] == nullptr); HloInstruction* dynamic_size = parent_->GetDynamicSize(body_root, index, dimension); TF_RET_CHECK(dynamic_size != nullptr); new_root_operands[output_index] = dynamic_size; } return absl::OkStatus(); })); for (auto operand : new_root_operands) { TF_RET_CHECK(operand != nullptr); } HloInstruction* new_body_root = hlo->while_body()->AddInstruction( HloInstruction::CreateTuple(new_root_operands)); for (int i = 0; i < original_tuple_count; ++i) { TF_RETURN_IF_ERROR(ForEachDynamicDimension( body_root, [&](ShapeIndex index, int64_t dimension, HloInstruction* dynamic_size) -> absl::Status { SetDynamicSize(new_body_root, index, dimension, dynamic_size); if (index.empty() || index.front() != i) { return absl::OkStatus(); } index.pop_front(); SetDynamicSize(new_root_operands[i], index, dimension, dynamic_size); return absl::OkStatus(); })); } hlo->while_body()->set_root_instruction(new_body_root); MarkAsChanged(); return dynamic_output_mapping.ForEachElementWithStatus( [&](const ShapeIndex& index, const absl::flat_hash_map<int64_t, int64_t>& dim_to_size) -> absl::Status { for (auto [dimension, output_index] : dim_to_size) { HloInstruction* dynamic_size = hlo->AddInstruction( HloInstruction::CreateGetTupleElement(hlo, output_index)); SetDynamicSize(result.replacement_instr, index, dimension, dynamic_size); ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index) ->set_dynamic_dimension(dimension, false); TF_RET_CHECK(!index.empty()); HloInstruction* gte = result.replacement_instr->mutable_operand(index.front()); TF_RET_CHECK(gte->opcode() == HloOpcode::kGetTupleElement); TF_RET_CHECK(gte->operand(0) == hlo); ShapeUtil::GetMutableSubshape(gte->mutable_shape(), ShapeIndexView(index).subspan(1)) ->set_dynamic_dimension(dimension, false); } return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleParameter( HloInstruction* hlo) { if (hlo->parent()->IsEntryComputation()) { TF_RET_CHECK(param_bindings_.empty()); return InsertPadToStaticOnInstruction(hlo); } return param_bindings_.ForEachBinding( [&](const DynamicParameterBinding::DynamicSizeParameter& dynamic_size, const DynamicParameterBinding::DynamicDimension& dynamic_dimension) -> absl::Status { if (dynamic_dimension.parameter_num == hlo->parameter_number()) { SetDynamicSize( hlo, dynamic_dimension.parameter_index, dynamic_dimension.dimension, TupleUtil::AddGetTupleElements(HloPosition{ hlo->parent()->parameter_instruction( dynamic_size.parameter_num), dynamic_size.parameter_index, })); } return absl::OkStatus(); }); } absl::Status DynamicDimensionInferenceVisitor::HandleInfeed( HloInstruction* hlo) { return InsertPadToStaticOnInstruction(hlo); } absl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimension( HloInstruction* inst, const DynamicDimensionFn& fn) { auto iter = parent_->per_hlo_dynamic_dimensions_.find(inst); if (iter != parent_->per_hlo_dynamic_dimensions_.end()) { for (auto& dynamic_dimension : iter->second) { HloInstruction* dynamic_size = parent_->GetDynamicSize( dynamic_dimension.inst, dynamic_dimension.index, dynamic_dimension.dim); TF_RETURN_IF_ERROR( fn(dynamic_dimension.index, dynamic_dimension.dim, dynamic_size)); } } return absl::OkStatus(); } absl::StatusOr<bool> DynamicDimensionInferenceVisitor::RequiresPadToStatic( HloInstruction* instr, ShapeIndex shape_index) { TF_RET_CHECK(ShapeUtil::IsLeafIndex(instr->shape(), shape_index)) << instr->shape() << " @ " << shape_index; if (ShapeUtil::GetSubshape(instr->shape(), shape_index).is_static()) { return false; } auto uses = dataflow_analysis_.GetValueDefinedAt(instr, shape_index).GetUses(); for (const auto& use : uses) { if (use.instruction->opcode() == HloOpcode::kAsyncStart || use.instruction->opcode() == HloOpcode::kAsyncUpdate || use.instruction->opcode() == HloOpcode::kAsyncDone || use.instruction->opcode() == HloOpcode::kCall || use.instruction->opcode() == HloOpcode::kTuple || use.instruction->opcode() == HloOpcode::kGetTupleElement || use.instruction->opcode() == HloOpcode::kConditional) { continue; } if (use.instruction->opcode() == HloOpcode::kWhile) { TF_RET_CHECK(use.operand_number == 0); HloInstruction* root = use.instruction->while_body()->root_instruction(); if (parent_->HasDynamicDimension(root, use.operand_index)) { return true; } continue; } if (use.instruction->opcode() == HloOpcode::kSetDimensionSize) { TF_RET_CHECK(use.operand_number == 0); return true; } if (use.instruction->opcode() == HloOpcode::kGetDimensionSize) { return true; } if (use.instruction->opcode() != HloOpcode::kCustomCall || use.instruction->custom_call_target() != "PadToStatic") { if (parent_->op_supports_dynamism_handler_ == nullptr) { return true; } if (parent_->op_supports_dynamism_handler_(use.instruction) == OpDynamismSupport::kNoSupport) { return true; } } } return false; } absl::Status DynamicDimensionInferenceVisitor::InsertPadToStaticOnInstruction( HloInstruction* inst) { if (inst->shape().is_static()) { return absl::OkStatus(); } ShapeTree<bool> needs_pad(inst->shape(), false); bool any_needs_pad = false; TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( inst->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { if (subshape.IsTuple()) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(bool do_pad, RequiresPadToStatic(inst, shape_index)); if (do_pad) { *needs_pad.mutable_element(shape_index) = true; any_needs_pad = true; } return absl::OkStatus(); })); if (!any_needs_pad) { return absl::OkStatus(); } auto users = inst->users(); ShapeTree<HloInstruction*> gtes = TupleUtil::DisassembleTupleInstruction(inst); ShapeTree<HloInstruction*> padded(inst->shape(), nullptr); TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapePostOrderWithStatus( inst->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) -> absl::Status { HloInstruction* element = gtes.element(shape_index); SetVisited(*gtes.element(shape_index)); if (subshape.IsTuple()) { absl::InlinedVector<HloInstruction*, 2> children; ShapeIndex child_index = shape_index; for (int i = 0; i < subshape.tuple_shapes_size(); ++i) { child_index.push_back(i); children.push_back(padded.element(child_index)); child_index.pop_back(); } HloInstruction* tuple = element->AddInstruction(HloInstruction::CreateVariadic( subshape, HloOpcode::kTuple, children)); TF_CHECK_OK(ForEachOperandDynamicDimension( tuple, [&](HloInstruction* operand, ShapeIndex index, int64_t dimension, int64_t operand_index, HloInstruction* dynamic_size) { index.push_front(operand_index); SetDynamicSize(tuple, index, dimension, dynamic_size); return absl::OkStatus(); })); *padded.mutable_element(shape_index) = tuple; return absl::OkStatus(); } if (needs_pad.element(shape_index)) { Shape data_output_shape = ShapeUtil::MakeStaticShape(element->shape()); Shape output_shape = ShapeUtil::MakeTupleShape({data_output_shape}); for (int64_t i = 0; i < element->shape().rank(); ++i) { ShapeUtil::AppendShapeToTuple(ShapeUtil::MakeScalarShape(S32), &output_shape); } HloInstruction* pad_to_static = inst->parent()->AddInstruction( HloInstruction::CreateCustomCall(output_shape, {element}, "PadToStatic"), absl::StrCat(element->name(), ".padded")); SetVisited(*pad_to_static); HloInstruction* data_output = inst->parent()->AddInstruction( HloInstruction::CreateGetTupleElement(data_output_shape, pad_to_static, 0), absl::StrCat(element->name(), ".data")); SetVisited(*data_output); for (int64_t i = 0; i < element->shape().rank(); ++i) { if (!element->shape().is_dynamic_dimension(i)) { continue; } HloInstruction* dynamic_size_output = inst->parent()->AddInstruction( HloInstruction::CreateGetTupleElement( output_shape.tuple_shapes(i + 1), pad_to_static, i + 1), absl::StrCat(element->name(), ".size")); SetVisited(*dynamic_size_output); SetDynamicSize(data_output, {}, i, dynamic_size_output, false); } *padded.mutable_element(shape_index) = data_output; } else { *padded.mutable_element(shape_index) = element; } return absl::OkStatus(); })); HloInstruction* result = padded.element({}); for (auto user : users) { for (int64_t i : user->OperandIndices(inst)) { TF_RETURN_IF_ERROR(user->ReplaceOperandWith(i, result)); } } if (inst->IsRoot()) { inst->parent()->set_root_instruction(result); } MarkAsChanged(); return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::InsertShapeCheck( HloInstruction* dim1, HloInstruction* dim2, bool support_implicit_broadcast) { switch (shape_check_mode_) { case DynamicDimensionInference::kIgnore: return absl::OkStatus(); case DynamicDimensionInference::kCompileTime: return InvalidArgument( "Fail to proof the equality of two dimensions at compile time: " "%s vs %s", dim1->ToString(), dim2->ToString()); case DynamicDimensionInference::kRuntime: { TF_ASSIGN_OR_RETURN( HloInstruction * assertion, MakeCompareHlo(Comparison::Direction::kEq, dim1, dim2)); if (shape_assertion_ == nullptr) { shape_assertion_ = assertion; } else { TF_ASSIGN_OR_RETURN( shape_assertion_, MakeBinaryHlo(HloOpcode::kAnd, shape_assertion_, assertion)); } return absl::OkStatus(); } default: LOG(FATAL) << "Unreachable"; } } absl::Status DynamicDimensionInferenceVisitor::ForEachDynamicDimensionInOperand( HloInstruction* inst, int64_t operand_index, OperandDynamicDimensionFn fn) { auto iter = parent_->per_hlo_dynamic_dimensions_.find(inst->operand(operand_index)); if (iter != parent_->per_hlo_dynamic_dimensions_.end()) { for (auto& dynamic_dimension : iter->second) { HloInstruction* dynamic_size = parent_->GetDynamicSize( dynamic_dimension.inst, dynamic_dimension.index, dynamic_dimension.dim); TF_RETURN_IF_ERROR(fn(dynamic_dimension.inst, dynamic_dimension.index, dynamic_dimension.dim, operand_index, dynamic_size)); } } return absl::OkStatus(); } absl::Status DynamicDimensionInferenceVisitor::ForEachOperandDynamicDimension( HloInstruction* inst, OperandDynamicDimensionFn fn) { for (int64_t operand_index = 0; operand_index < inst->operand_count(); ++operand_index) { TF_RETURN_IF_ERROR( ForEachDynamicDimensionInOperand(inst, operand_index, fn)); } return absl::OkStatus(); } void DynamicDimensionInference::SetDynamicSize(HloInstruction* inst, const ShapeIndex& index, int64_t dim, HloInstruction* size) { CHECK_NE(inst, nullptr); CHECK_NE(size, nullptr); VLOG(1) << "Set dimension inst " << inst->ToString() << " index " << index.ToString() << "@" << dim << " to " << size->ToShortString(); const Shape& subshape = ShapeUtil::GetSubshape(inst->shape(), index); CHECK(!subshape.IsTuple()) << "Can't set a tuple shape to dynamic dimension"; CHECK(dim < subshape.rank() && dim >= 0) << "Asked to set invalid dynamic dimension. Shape: " << subshape.ToString() << ", Dimension: " << dim; DynamicDimension dynamic_dimension{inst, index, dim}; auto [it, inserted] = dynamic_mapping_.try_emplace(dynamic_dimension, size); if (!inserted) { CHECK_EQ(size, it->second) << "old: " << it->second->ToShortString() << ", new: " << size->ToShortString(); } auto iter = per_hlo_dynamic_dimensions_.try_emplace(inst); iter.first->second.emplace(dynamic_dimension); } void DynamicDimensionInference::CopyMapping( HloInstruction* from, HloInstruction* to, const absl::flat_hash_map<HloInstruction*, HloInstruction*>* dynamic_size_map) { auto iter = per_hlo_dynamic_dimensions_.find(from); if (iter != per_hlo_dynamic_dimensions_.end()) { for (auto& dynamic_dimension : iter->second) { HloInstruction* dynamic_size = GetDynamicSize(dynamic_dimension.inst, dynamic_dimension.index, dynamic_dimension.dim); if (dynamic_size_map != nullptr) { dynamic_size = dynamic_size_map->at(dynamic_size); } SetDynamicSize(to, dynamic_dimension.index, dynamic_dimension.dim, dynamic_size); } } } absl::StatusOr<DynamicDimensionInference> DynamicDimensionInference::Run( HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler, CustomCallInferenceHandler custom_call_handler, ShapeCheckMode shape_check_mode, const AssertionGenerator& assertion_generator, const absl::flat_hash_set<absl::string_view>& execution_threads) { DynamicDimensionInference inference( module, std::move(op_supports_dynamism_handler), std::move(custom_call_handler), shape_check_mode, assertion_generator, execution_threads); TF_RETURN_IF_ERROR(inference.AnalyzeDynamicDimensions()); return std::move(inference); } std::string DynamicDimensionInference::ToString() const { std::vector<std::string> pieces; pieces.push_back("DynamicDimensionInference: "); for (const auto& mapping : dynamic_mapping_) { const DynamicDimension& dynamic_dimension = mapping.first; pieces.push_back(absl::StrFormat( " -- instruction %s at %s has dim %lld as dynamic" " dimension, which is represented by instruction %s", dynamic_dimension.inst->ToString(), dynamic_dimension.index.ToString(), dynamic_dimension.dim, mapping.second->ToString())); } return absl::StrJoin(pieces, "\n"); } DynamicDimensionInference::DynamicDimensionInference( HloModule* module, OpSupportsDynamismHandler op_supports_dynamism_handler, CustomCallInferenceHandler custom_call_handler, ShapeCheckMode shape_check_mode, AssertionGenerator assertion_generator, const absl::flat_hash_set<absl::string_view>& execution_threads) : module_(module), op_supports_dynamism_handler_(std::move(op_supports_dynamism_handler)), custom_call_handler_(std::move(custom_call_handler)), shape_check_mode_(shape_check_mode), assertion_generator_(assertion_generator), execution_threads_(execution_threads) {} absl::Status DynamicDimensionInference::AnalyzeDynamicDimensions() { TF_ASSIGN_OR_RETURN( std::unique_ptr<HloDataflowAnalysis> dataflow_analysis, HloDataflowAnalysis::Run(*module_, false, true, nullptr, nullptr, execution_threads_)); for (HloComputation* computation : module_->MakeComputationPostOrder()) { if (!HloInstruction::IsThreadIncluded(computation->execution_thread(), execution_threads_)) { continue; } TF_ASSIGN_OR_RETURN( bool changed, DynamicDimensionInferenceVisitor::Run( computation, *dataflow_analysis, {}, this, custom_call_handler_, shape_check_mode_, assertion_generator_)); changed_ |= changed; } return absl::OkStatus(); } void DynamicDimensionInference::ReplaceAllDynamicDimensionUsesWith( HloInstruction* replace, HloInstruction* with) { CHECK(Shape::Equal().IgnoreLayout()(replace->shape(), ShapeUtil::MakeScalarShape(S32))); CHECK(Shape::Equal().IgnoreLayout()(with->shape(), ShapeUtil::MakeScalarShape(S32))); for (auto& kv : dynamic_mapping_) { if (kv.second == replace) { kv.second = with; } } } absl::Status DynamicDimensionInference::ForwardDynamicSize( HloInstruction* inst, HloInstruction* new_inst, const ShapeIndex& index) { TF_RET_CHECK(ShapeUtil::Compatible(inst->shape(), new_inst->shape())); for (int64_t dim = 0; dim < inst->shape().rank(); ++dim) { DynamicDimension dynamic_dimension_new{new_inst, index, dim}; DynamicDimension dynamic_dimension{inst, index, dim}; auto iter = dynamic_mapping_.find(dynamic_dimension); if (iter != dynamic_mapping_.end()) { dynamic_mapping_.insert({dynamic_dimension_new, iter->second}); auto iter = per_hlo_dynamic_dimensions_.try_emplace(new_inst); iter.first->second.emplace(dynamic_dimension_new); } } return absl::OkStatus(); } bool DynamicDimensionInference::HasDynamicDimension( HloInstruction* inst, ShapeIndexView index) const { bool has_dynamic_dim = false; ShapeUtil::ForEachSubshape(inst->shape(), [&](const Shape& subshape, const ShapeIndex& subindex) { if (subshape.IsTuple()) { return; } if (ShapeIndexView(subindex).subspan(0, index.size()) != index) { return; } for (int64_t i = 0; i < subshape.dimensions_size(); ++i) { HloInstruction* operand_dynamic_size = GetDynamicSize(inst, subindex, i); if (operand_dynamic_size != nullptr) { has_dynamic_dim = true; } } }); return has_dynamic_dim; } Shape DynamicDimensionInference::GetDynamicShape(HloInstruction* inst) { Shape shape = inst->shape(); ShapeUtil::ForEachMutableSubshape( &shape, [&](Shape* subshape, const ShapeIndex& index) { if (!subshape->IsArray()) { return; } for (int64_t dimension = 0; dimension < subshape->rank(); ++dimension) { if (GetDynamicSize(inst, index, dimension) != nullptr) { subshape->set_dynamic_dimension(dimension, true); } } }); return shape; } HloInstruction* DynamicDimensionInference::GetDynamicSize( HloInstruction* inst, const ShapeIndex& index, int64_t dim) const { auto iter = dynamic_mapping_.find(DynamicDimension{inst, index, dim}); if (iter != dynamic_mapping_.end()) { return iter->second; } return nullptr; } const HloInstruction* DynamicDimensionInference::GetDynamicSize( const HloInstruction* inst, const ShapeIndex& index, int64_t dim) const { return GetDynamicSize(const_cast<HloInstruction*>(inst), index, dim); } std::vector<HloInstruction*> DynamicDimensionInference::GetDynamicSizes( HloInstruction* inst, const ShapeIndex& index) const { CHECK(ShapeUtil::IndexIsValid(inst->shape(), index)); const int64_t rank = ShapeUtil::GetSubshape(inst->shape(), index).rank(); std::vector<HloInstruction*> result(rank, nullptr); for (int64_t i = 0; i < rank; ++i) { result[i] = GetDynamicSize(inst, index, i); } return result; } bool DynamicDimensionInference::CanInfer(HloInstruction* hlo) { if (hlo->shape().is_static() && hlo->called_computations().empty() && hlo->opcode() != HloOpcode::kCustomCall) { return false; } bool ok = true; for (int64_t operand_index = 0; operand_index < hlo->operand_count(); ++operand_index) { ShapeUtil::ForEachSubshape( hlo->operand(operand_index)->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { if (!subshape.IsArray()) { return; } for (int64_t dimension = 0; dimension < subshape.rank(); ++dimension) { bool shape_is_dynamic = subshape.is_dynamic_dimension(dimension); bool dynamic_size_recorded = GetDynamicSize(hlo->operand(operand_index), shape_index, dimension) != nullptr; if (shape_is_dynamic && !dynamic_size_recorded) { VLOG(2) << "cannot infer " << hlo->ToShortString() << " because operand " << operand_index << " (" << hlo->operand(operand_index)->ToShortString() << ")" << " subshape " << shape_index.ToString() << " is missing dynamic size for dimension " << dimension; ok = false; } CHECK(hlo->operand(operand_index)->opcode() == HloOpcode::kSetDimensionSize || hlo->operand(operand_index)->opcode() == HloOpcode::kCustomCall || !shape_is_dynamic || !dynamic_size_recorded); } }); } return ok; } }
#include "xla/service/dynamic_dimension_inference.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal.h" #include "xla/service/hlo_runner.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test_benchmark.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { class DynamicDimensionInferenceTest : public HloTestBase { protected: DynamicDimensionInferenceTest() : HloTestBase() { module_ = CreateNewVerifiedModule(); } absl::Status RunInference( OpSupportsDynamismHandler op_supports_dynamism_handler = nullptr, DynamicDimensionInference::CustomCallInferenceHandler handler = nullptr, DynamicDimensionInference::ShapeCheckMode shape_check_mode = DynamicDimensionInference::ShapeCheckMode::kIgnore, const DynamicDimensionInference::AssertionGenerator& assertion_generator = nullptr) { TF_ASSIGN_OR_RETURN(DynamicDimensionInference inference, DynamicDimensionInference::Run( module_.get(), op_supports_dynamism_handler, handler, shape_check_mode, assertion_generator)); inference_ = std::make_unique<DynamicDimensionInference>(inference); return absl::OkStatus(); } HloComputation* GetAdd() { auto embedded_builder = HloComputation::Builder("add"); auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "lhs")); auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {}), "rhs")); embedded_builder.AddInstruction( HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs)); return module_->AddEmbeddedComputation(embedded_builder.Build()); } HloComputation* GetAddTuple() { auto embedded_builder = HloComputation::Builder("add"); auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "lhs")); auto lhs_1 = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {}), "lhs.1")); auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 2, ShapeUtil::MakeShape(F32, {}), "rhs")); auto rhs_1 = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 3, ShapeUtil::MakeShape(F32, {}), "rhs.1")); auto add = embedded_builder.AddInstruction( HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs)); auto add_1 = embedded_builder.AddInstruction(HloInstruction::CreateBinary( lhs->shape(), HloOpcode::kAdd, lhs_1, rhs_1)); embedded_builder.AddInstruction(HloInstruction::CreateTuple({add, add_1})); return module_->AddEmbeddedComputation(embedded_builder.Build()); } HloComputation* GetGe() { auto embedded_builder = HloComputation::Builder("ge"); auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "lhs")); auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {}), "rhs")); embedded_builder.AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {}), lhs, rhs, ComparisonDirection::kGe)); return module_->AddEmbeddedComputation(embedded_builder.Build()); } std::unique_ptr<HloModule> module_; std::unique_ptr<DynamicDimensionInference> inference_; const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {}); }; TEST_F(DynamicDimensionInferenceTest, ParamTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false}); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, input_shape, "param")); auto param2 = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param")); auto result = builder.AddInstruction( HloInstruction::CreateSetDimensionSize(dynamic_shape, param, param2, 1)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(result, {}, 1), param2); EXPECT_EQ(inference_->GetDynamicSize(param, {}, 0), nullptr); EXPECT_EQ(inference_->GetDynamicSize(param2, {}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, ElementwiseTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false}); auto data_param = builder.AddInstruction( HloInstruction::CreateParameter(0, input_shape, "data_param")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); auto dynamic_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param, size_param, 1)); auto* negate = builder.AddInstruction(HloInstruction::CreateUnary( dynamic_shape, HloOpcode::kNegate, dynamic_param)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(negate, {}, 1), size_param); } TEST_F(DynamicDimensionInferenceTest, ReduceTestI) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}); auto reduce_shape = ShapeUtil::MakeShape(F32, {2}, {true}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, true, false}); auto data_param = builder.AddInstruction( HloInstruction::CreateParameter(0, input_shape, "data_param")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); auto dynamic_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param, size_param, 1)); auto negate = builder.AddInstruction(HloInstruction::CreateUnary( dynamic_shape, HloOpcode::kNegate, dynamic_param)); auto init = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto reduce = builder.AddInstruction(HloInstruction::CreateReduce( reduce_shape, negate, init, {0, 2}, GetAdd())); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), size_param); } TEST_F(DynamicDimensionInferenceTest, ReduceTestII) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}); auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true}); auto data_param = builder.AddInstruction( HloInstruction::CreateParameter(0, input_shape, "data_param")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); auto dynamic_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param, size_param, 2)); auto negate = builder.AddInstruction(HloInstruction::CreateUnary( dynamic_shape, HloOpcode::kNegate, dynamic_param)); auto init = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto reduce = builder.AddInstruction( HloInstruction::CreateReduce(reduce_shape, negate, init, {1}, GetAdd())); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 1), size_param); EXPECT_EQ(inference_->GetDynamicSize(reduce, {}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, VariadicReduce) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}); auto reduce_shape = ShapeUtil::MakeShape(F32, {1, 2}, {false, true}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true}); auto data_param_1 = builder.AddInstruction( HloInstruction::CreateParameter(0, input_shape, "data_param")); auto data_param_2 = builder.AddInstruction( HloInstruction::CreateParameter(1, input_shape, "data_param.2")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(2, scalar_shape_, "size_param")); auto data_param_dynamic_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param_1, size_param, 2)); auto data_param_dynamic_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param_2, size_param, 2)); auto dynamic_negate_1 = builder.AddInstruction(HloInstruction::CreateUnary( dynamic_shape, HloOpcode::kNegate, data_param_dynamic_1)); auto dynamic_negate_2 = builder.AddInstruction(HloInstruction::CreateUnary( dynamic_shape, HloOpcode::kNegate, data_param_dynamic_2)); auto init = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto reduce = builder.AddInstruction(HloInstruction::CreateReduce( ShapeUtil::MakeTupleShape({reduce_shape, reduce_shape}), {dynamic_negate_1, dynamic_negate_2}, {init, init}, {1}, GetAddTuple())); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 1), size_param); EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 1), size_param); EXPECT_EQ(inference_->GetDynamicSize(reduce, {0}, 0), nullptr); EXPECT_EQ(inference_->GetDynamicSize(reduce, {1}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, DotTest) { auto builder = HloComputation::Builder(TestName()); constexpr int xdim = 3; constexpr int ydim = 2; constexpr int zdim = 1; auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}); auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim}); auto xy_dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true}); auto yz_dynamic_shape = ShapeUtil::MakeShape(F32, {ydim, zdim}, {true, false}); auto xz_dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, zdim}, {true, false}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, xy_shape, "A")); auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, yz_shape, "B")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param, size_param, 0)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( xy_dynamic_shape, a_param, size_param, 1)); b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( yz_dynamic_shape, b_param, size_param, 0)); DotDimensionNumbers dot_dnums; dot_dnums.add_lhs_contracting_dimensions(1); dot_dnums.add_rhs_contracting_dimensions(0); auto dot = builder.AddInstruction( HloInstruction::CreateDot(xz_dynamic_shape, a_param, b_param, dot_dnums, HloTestBase::DefaultPrecisionConfig(2))); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr); } TEST_F(DynamicDimensionInferenceTest, DotTestBatch) { auto builder = HloComputation::Builder(TestName()); auto lhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8}); auto rhs_shape = ShapeUtil::MakeShape(F32, {4, 128, 2, 8}); auto output_shape = ShapeUtil::MakeShape(F32, {4, 2, 128, 128}, {true, false, false, false}); auto lhs_shape_dynamic = ShapeUtil::MakeShape(F32, {4, 128, 2, 8}, {true, false, false, false}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, lhs_shape, "A")); auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, rhs_shape, "B")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( lhs_shape_dynamic, a_param, size_param, 0)); DotDimensionNumbers dot_dnums; dot_dnums.add_lhs_contracting_dimensions(3); dot_dnums.add_rhs_contracting_dimensions(3); dot_dnums.add_lhs_batch_dimensions(0); dot_dnums.add_lhs_batch_dimensions(2); dot_dnums.add_rhs_batch_dimensions(0); dot_dnums.add_rhs_batch_dimensions(2); auto dot = builder.AddInstruction( HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums, HloTestBase::DefaultPrecisionConfig(2))); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), size_param); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 3), nullptr); } TEST_F(DynamicDimensionInferenceTest, DotTestMultiContracting) { auto builder = HloComputation::Builder(TestName()); auto lhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 8, 64}); auto rhs_shape = ShapeUtil::MakeShape(F32, {2, 2, 512}); auto output_shape = ShapeUtil::MakeShape(F32, {8, 64, 512}); auto lhs_shape_dynamic = ShapeUtil::MakeShape(F32, {2, 2, 8, 64}, {true, true, false, false}); auto rhs_shape_dynamic = ShapeUtil::MakeShape(F32, {2, 2, 512}, {true, true, false}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, lhs_shape, "A")); auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, rhs_shape, "B")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, lhs_shape.dimensions(), {true, false, false, false}), a_param, size_param, 0)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( lhs_shape_dynamic, a_param, size_param, 1)); b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, rhs_shape.dimensions(), {true, false, false}), b_param, size_param, 0)); b_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( rhs_shape_dynamic, b_param, size_param, 1)); DotDimensionNumbers dot_dnums; dot_dnums.add_lhs_contracting_dimensions(0); dot_dnums.add_lhs_contracting_dimensions(1); dot_dnums.add_rhs_contracting_dimensions(0); dot_dnums.add_rhs_contracting_dimensions(1); auto dot = builder.AddInstruction( HloInstruction::CreateDot(output_shape, a_param, b_param, dot_dnums, HloTestBase::DefaultPrecisionConfig(2))); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 0), nullptr); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 1), nullptr); EXPECT_EQ(inference_->GetDynamicSize(dot, {}, 2), nullptr); } TEST_F(DynamicDimensionInferenceTest, ConvolutionTest) { auto builder = HloComputation::Builder(TestName()); constexpr int xdim = 3; constexpr int ydim = 2; constexpr int zdim = 1; auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}); auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim}); auto xy_shape_dynamic = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, true}); auto zx_shape_dynamic = ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, xy_shape, "A")); auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, yz_shape, "B")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, xy_shape.dimensions(), {true, false}), a_param, size_param, 0)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( xy_shape_dynamic, a_param, size_param, 1)); auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0); dnums.set_kernel_input_feature_dimension(0); dnums.set_kernel_output_feature_dimension(1); dnums.set_input_batch_dimension(0); dnums.set_output_batch_dimension(1); dnums.set_output_feature_dimension(0); Window window; auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve( zx_shape_dynamic, a_param, b_param, 1, 1, window, dnums, HloTestBase::DefaultPrecisionConfig(2))); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 1), size_param); EXPECT_EQ(inference_->GetDynamicSize(conv, {}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, TransposeTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}); auto output_shape = ShapeUtil::MakeShape(F32, {3, 2, 1}, {true, true, true}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter( 3, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param, size_param_1, 0)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param, size_param_2, 1)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param_3, 2)); auto* transpose = builder.AddInstruction( HloInstruction::CreateTranspose(output_shape, a_param, {2, 1, 0})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3); EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_2); EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_1); } TEST_F(DynamicDimensionInferenceTest, NonDescendingTransposeTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}); auto output_shape = ShapeUtil::MakeShape(F32, {3, 1, 2}, {true, true, true}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, true}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param_1 = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); auto* size_param_2 = builder.AddInstruction(HloInstruction::CreateParameter( 2, scalar_shape_, "size_param")); auto* size_param_3 = builder.AddInstruction(HloInstruction::CreateParameter( 3, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, false, false}), a_param, size_param_1, 0)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {1, 2, 3}, {true, true, false}), a_param, size_param_2, 1)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param_3, 2)); auto* transpose = builder.AddInstruction( HloInstruction::CreateTranspose(output_shape, a_param, {2, 0, 1})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 0), size_param_3); EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 1), size_param_1); EXPECT_EQ(inference_->GetDynamicSize(transpose, {}, 2), size_param_2); } TEST_F(DynamicDimensionInferenceTest, ReshapeTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6}); auto output_shape = ShapeUtil::MakeShape( F32, {6, 4, 1, 5, 2, 3}, {false, true, false, true, false, false}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6}, {false, false, true, true, false}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {2, 3, 4, 5, 6}, {false, false, true, false, false}), a_param, size_param, 2)); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param, 3)); auto* reshape = builder.AddInstruction( HloInstruction::CreateReshape(output_shape, a_param)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 0), nullptr); EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 1), size_param); EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 2), nullptr); EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 3), size_param); EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 4), nullptr); EXPECT_EQ(inference_->GetDynamicSize(reshape, {}, 5), nullptr); } TEST_F(DynamicDimensionInferenceTest, ReshapeInferredDimensionTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {4, 5}); auto output_shape = ShapeUtil::MakeShape(F32, {1, 4, 5}, {true, false, false}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {true, false}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param, 0)); auto* reshape = builder.AddInstruction(HloInstruction::CreateReshape( output_shape, a_param, 0)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, ReshapeTestMajorDimension) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {32, 10, 4}); auto output_shape = ShapeUtil::MakeShape(F32, {320, 4}, {true, false}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {32, 10, 4}, {true, false, false}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param, 0)); auto* reshape = builder.AddInstruction( HloInstruction::CreateReshape(output_shape, a_param)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); absl::Status status = RunInference(); EXPECT_NE(inference_->GetDynamicSize(reshape, {}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, ReshapeIntoScalar) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {1}); auto output_shape = ShapeUtil::MakeShape(F32, {}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {1}, {true}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param, 0)); builder.AddInstruction(HloInstruction::CreateReshape(output_shape, a_param)); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_CHECK_OK(RunInference()); } TEST_F(DynamicDimensionInferenceTest, GatherTest) { const std::string hlo_text = R"( HloModule TensorFlowGatherV2 ENTRY main { operand = s32[20,10]{1,0} parameter(0) indices = s32[32,20] parameter(1) dynamic_size = s32[] parameter(2) indices_dynamic = s32[<=32,20] set-dimension-size(indices, dynamic_size), dimensions={0} ROOT gather = s32[<=32,20,10]{2,1,0} gather(%operand, %indices_dynamic), offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,10} } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_text)); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize( module_->entry_computation()->root_instruction(), {}, 0), module_->entry_computation()->parameter_instruction(2)); } TEST_F(DynamicDimensionInferenceTest, BroadcastTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {2}); auto output_shape = ShapeUtil::MakeShape(F32, {3, 2, 4}, {false, true, false}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {2}, {true}); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param, 0)); auto* broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(output_shape, a_param, {1})); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 0), nullptr); EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 1), size_param); EXPECT_EQ(inference_->GetDynamicSize(broadcast, {}, 2), nullptr); } TEST_F(DynamicDimensionInferenceTest, WhileTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false}); auto tuple_shape = ShapeUtil::MakeTupleShape({input_shape, input_shape}); auto dynamic_tuple_shape = ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape}); auto body_builder = HloComputation::Builder("body"); auto body_param = body_builder.AddInstruction( HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param")); auto gte_0 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 0)); auto gte_1 = body_builder.AddInstruction( HloInstruction::CreateGetTupleElement(dynamic_shape, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( dynamic_shape, HloOpcode::kAdd, gte_0, gte_1)); body_builder.AddInstruction(HloInstruction::CreateTuple({add, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); auto cond_builder = HloComputation::Builder("condition"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, dynamic_tuple_shape, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* condition = module_->AddEmbeddedComputation(cond_builder.Build()); auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, tuple_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); auto* a_0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(input_shape, a_param, 0)); a_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_0, size_param, 0)); auto* a_1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(input_shape, a_param, 0)); a_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_1, size_param, 0)); a_param = builder.AddInstruction(HloInstruction::CreateTuple({a_0, a_1})); builder.AddInstruction(HloInstruction::CreateWhile(dynamic_tuple_shape, condition, body, a_param)); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); HloInstruction* while_hlo = nullptr; for (HloInstruction* inst : module_->entry_computation()->instructions()) { if (inst->opcode() == HloOpcode::kWhile) { while_hlo = inst; } } ASSERT_NE(while_hlo, nullptr); EXPECT_EQ(while_hlo->shape().tuple_shapes_size(), 4); HloInstruction* add_inst = nullptr; for (HloInstruction* inst : while_hlo->while_body()->instructions()) { if (inst->opcode() == HloOpcode::kAdd) { add_inst = inst; } } EXPECT_NE(add_inst, nullptr); EXPECT_NE(inference_->GetDynamicSize(add_inst, {}, 0), nullptr); EXPECT_NE(inference_->GetDynamicSize( module_->entry_computation()->root_instruction(), {0}, 0), nullptr); EXPECT_NE(inference_->GetDynamicSize( module_->entry_computation()->root_instruction(), {1}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, ConditionalInputTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false}); auto output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2}); auto tuple_shape_1 = ShapeUtil::MakeTupleShape({input_shape}); auto tuple_shape_2 = ShapeUtil::MakeTupleShape({input_shape, input_shape}); auto tuple_shape_3 = ShapeUtil::MakeTupleShape({input_shape, input_shape, input_shape}); auto tuple_shape_2_dynamic = ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape}); auto tuple_shape_3_dynamic = ShapeUtil::MakeTupleShape({input_shape, dynamic_shape, dynamic_shape}); auto true_builder = HloComputation::Builder("true"); { auto true_param = true_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape_2_dynamic, "param")); auto gte_0 = true_builder.AddInstruction( HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 0)); auto gte_1 = true_builder.AddInstruction( HloInstruction::CreateGetTupleElement(dynamic_shape, true_param, 1)); auto add = true_builder.AddInstruction(HloInstruction::CreateBinary( dynamic_shape, HloOpcode::kAdd, gte_0, gte_1)); true_builder.AddInstruction(HloInstruction::CreateTuple({add})); } HloComputation* true_branch = module_->AddEmbeddedComputation(true_builder.Build()); auto false_builder = HloComputation::Builder("false"); { auto false_param = false_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape_3_dynamic, "param")); auto gte_0 = false_builder.AddInstruction( HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 1)); auto gte_1 = false_builder.AddInstruction( HloInstruction::CreateGetTupleElement(dynamic_shape, false_param, 2)); auto add = false_builder.AddInstruction(HloInstruction::CreateBinary( dynamic_shape, HloOpcode::kAdd, gte_0, gte_1)); false_builder.AddInstruction(HloInstruction::CreateTuple({add})); } HloComputation* false_branch = module_->AddEmbeddedComputation(false_builder.Build()); auto* pred_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeScalarShape(PRED), "pred")); auto* tuple_2_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, tuple_shape_2, "tuple_2_param")); auto* tuple_3_param = builder.AddInstruction(HloInstruction::CreateParameter( 2, tuple_shape_3, "tuple_3_param")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 3, scalar_shape_, "size_param")); auto* param_2_0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 0)); param_2_0 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, param_2_0, size_param, 0)); auto* param_2_1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(input_shape, tuple_2_param, 1)); param_2_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, param_2_1, size_param, 0)); tuple_2_param = builder.AddInstruction( HloInstruction::CreateTuple({param_2_0, param_2_1})); auto* param_3_0 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 0)); auto* param_3_1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 1)); param_3_1 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, param_3_1, size_param, 0)); auto* param_3_2 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(input_shape, tuple_3_param, 2)); param_3_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, param_3_1, size_param, 0)); tuple_3_param = builder.AddInstruction( HloInstruction::CreateTuple({param_3_0, param_3_1, param_3_2})); builder.AddInstruction(HloInstruction::CreateConditional( tuple_shape_1, pred_param, tuple_2_param, true_branch, tuple_3_param, false_branch)); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); HloInstruction* conditional_hlo = nullptr; for (HloInstruction* inst : module_->entry_computation()->instructions()) { if (inst->opcode() == HloOpcode::kConditional) { conditional_hlo = inst; } } ASSERT_NE(conditional_hlo, nullptr); EXPECT_EQ(conditional_hlo->shape().tuple_shapes_size(), 2); HloInstruction* add_true_branch = nullptr; for (HloInstruction* inst : conditional_hlo->true_computation()->instructions()) { if (inst->opcode() == HloOpcode::kAdd) { add_true_branch = inst; } } EXPECT_NE(add_true_branch, nullptr); EXPECT_NE(inference_->GetDynamicSize(add_true_branch, {}, 0), nullptr); HloInstruction* add_false_branch = nullptr; for (HloInstruction* inst : conditional_hlo->false_computation()->instructions()) { if (inst->opcode() == HloOpcode::kAdd) { add_false_branch = inst; } } EXPECT_NE(add_false_branch, nullptr); EXPECT_NE(inference_->GetDynamicSize(add_false_branch, {}, 0), nullptr); EXPECT_NE(inference_->GetDynamicSize(conditional_hlo, {0}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, ReduceWindowBatchTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4}); auto output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false}); Window window; WindowDimension* batch_dim = window.add_dimensions(); batch_dim->set_size(1); batch_dim->set_stride(1); batch_dim->set_padding_low(0); batch_dim->set_padding_high(0); batch_dim->set_window_dilation(1); batch_dim->set_base_dilation(1); for (int64_t i = 0; i < 2; ++i) { WindowDimension* dim = window.add_dimensions(); dim->set_size(2); dim->set_stride(2); dim->set_padding_low(0); dim->set_padding_high(0); dim->set_window_dilation(1); dim->set_base_dilation(1); } auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, a_param, size_param, 0)); auto init = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto* reduce_window = builder.AddInstruction(HloInstruction::CreateReduceWindow( output_shape, a_param, init, window, GetAdd())); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(reduce_window, {}, 0), size_param); } TEST_F(DynamicDimensionInferenceTest, SelectAndScatterTest) { auto builder = HloComputation::Builder(TestName()); auto input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4}); auto source_shape = ShapeUtil::MakeShape(F32, {2, 2, 2}); auto input_shape_dynamic = ShapeUtil::MakeShape(F32, {2, 4, 4}, {true, false, false}); auto source_shape_dynamic = ShapeUtil::MakeShape(F32, {2, 2, 2}, {true, false, false}); Window window; WindowDimension* batch_dim = window.add_dimensions(); batch_dim->set_size(1); batch_dim->set_stride(1); batch_dim->set_padding_low(0); batch_dim->set_padding_high(0); batch_dim->set_window_dilation(1); batch_dim->set_base_dilation(1); for (int64_t i = 0; i < 2; ++i) { WindowDimension* dim = window.add_dimensions(); dim->set_size(2); dim->set_stride(2); dim->set_padding_low(0); dim->set_padding_high(0); dim->set_window_dilation(1); dim->set_base_dilation(1); } auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, input_shape, "A")); auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter( 1, scalar_shape_, "size_param")); auto* source = builder.AddInstruction(HloInstruction::CreateParameter( 2, source_shape, "B")); a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( input_shape_dynamic, a_param, size_param, 0)); source = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( source_shape_dynamic, source, size_param, 0)); auto init = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); auto* sns = builder.AddInstruction(HloInstruction::CreateSelectAndScatter( input_shape_dynamic, a_param, GetGe(), window, source, init, GetAdd())); module_->AddEntryComputation(builder.Build()); SCOPED_TRACE(module_->ToString()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(sns, {}, 0), size_param); } TEST_F(DynamicDimensionInferenceTest, ConcatTest) { auto builder = HloComputation::Builder(TestName()); auto data_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param_1")); auto data_param_2 = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {5, 8}), "data_param_2")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(2, scalar_shape_, "size_param")); data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param, 0)); data_param_2 = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {5, 8}, {true, false}), data_param_2, size_param, 0)); auto* concat = builder.AddInstruction(HloInstruction::CreateConcatenate( ShapeUtil::MakeShape(F32, {5, 15}, {true, false}), {data_param, data_param_2}, 1)); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(concat, {}, 0), size_param); } TEST_F(DynamicDimensionInferenceTest, SliceTest) { auto builder = HloComputation::Builder(TestName()); auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {false, true}); auto data_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param, size_param, 1)); auto* slice = builder.AddInstruction(HloInstruction::CreateSlice( dynamic_shape, data_param, {0, 0}, {5, 7}, {1, 1})); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 1), size_param); } TEST_F(DynamicDimensionInferenceTest, DynamicSliceTest) { auto builder = HloComputation::Builder(TestName()); auto data_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); std::vector<HloInstruction*> params; for (int i = 0; i < 2; ++i) { params.push_back(builder.AddInstruction(HloInstruction::CreateParameter( i + 2, ShapeUtil::MakeShape(S32, {}), "slice_indices"))); } data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param, 0)); auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice( ShapeUtil::MakeShape(F32, {5, 1}, {true, false}), data_param, params, {5, 1})); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), size_param); } TEST_F(DynamicDimensionInferenceTest, SortTest) { auto builder = HloComputation::Builder(TestName()); auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false}); auto data_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); auto compare_builder = HloComputation::Builder("condition"); compare_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "param1")); compare_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {}), "param2")); compare_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* compare = module_->AddEmbeddedComputation(compare_builder.Build()); data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param, size_param, 0)); auto* sort = builder.AddInstruction( HloInstruction::CreateSort(dynamic_shape, 1, {data_param}, compare, false)); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(sort, {}, 0), size_param); } TEST_F(DynamicDimensionInferenceTest, MultiValueSortTest) { auto builder = HloComputation::Builder(TestName()); auto shape = ShapeUtil::MakeShape(F32, {5, 7}); auto dynamic_shape = ShapeUtil::MakeShape(F32, {5, 7}, {true, false}); auto data_param = builder.AddInstruction( HloInstruction::CreateParameter(0, shape, "data_param")); auto size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); auto compare_builder = HloComputation::Builder("condition"); compare_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "param1")); compare_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {}), "param2")); compare_builder.AddInstruction(HloInstruction::CreateParameter( 2, ShapeUtil::MakeShape(F32, {}), "param3")); compare_builder.AddInstruction(HloInstruction::CreateParameter( 3, ShapeUtil::MakeShape(F32, {}), "param4")); compare_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* compare = module_->AddEmbeddedComputation(compare_builder.Build()); data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( dynamic_shape, data_param, size_param, 0)); auto* sort = builder.AddInstruction(HloInstruction::CreateSort( ShapeUtil::MakeTupleShape({dynamic_shape, dynamic_shape}), 1, {data_param, data_param}, compare, false)); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(sort, {0}, 0), size_param); EXPECT_EQ(inference_->GetDynamicSize(sort, {1}, 0), size_param); } TEST_F(DynamicDimensionInferenceTest, DynamicSliceSingleElementTest) { auto builder = HloComputation::Builder(TestName()); auto data_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param")); auto* size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); std::vector<HloInstruction*> params; for (int i = 0; i < 2; ++i) { params.push_back(builder.AddInstruction(HloInstruction::CreateParameter( i + 2, ShapeUtil::MakeShape(S32, {}), "slice_indices"))); } data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param, 0)); auto* slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice( ShapeUtil::MakeShape(F32, {1, 1}), data_param, params, {1, 1})); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(slice, {}, 0), nullptr); } TEST_F(DynamicDimensionInferenceTest, InfersCustomOp) { auto builder = HloComputation::Builder(TestName()); auto data_param = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {5, 7}), "data_param")); auto* size_param = builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "size_param")); data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {5, 7}, {true, false}), data_param, size_param, 0)); builder.AddInstruction(HloInstruction::CreateCustomCall( ShapeUtil::MakeShape(F32, {1, 1}), {data_param}, "MyCustomOp", "")); module_->AddEntryComputation(builder.Build()); bool handler_called = false; auto handler = [&](HloInstruction* hlo, DynamicDimensionInference* inference) { CHECK(inference != nullptr); CHECK(Cast<HloCustomCallInstruction>(hlo) != nullptr); handler_called = true; return absl::OkStatus(); }; TF_ASSERT_OK(RunInference(nullptr, handler)); EXPECT_TRUE(handler_called); } TEST_F(DynamicDimensionInferenceTest, DynamicReshapeOp) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {9}), "data_input")); auto six = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(6))); auto dynamic_input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {9}, {true}), input, six, 0)); auto dynamic_size = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(S32, {}), "size_param")); auto three = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(3))); auto dynamic_reshape = builder.AddInstruction(HloInstruction::CreateDynamicReshape( ShapeUtil::MakeShape(F32, {3, 3}, {false, true}), dynamic_input, {three, dynamic_size})); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), nullptr); EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), dynamic_size); } TEST_F(DynamicDimensionInferenceTest, ReshapeOpWithMultipleDynamicDimensions) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {9, 2}), "data_input")); auto six = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(6))); input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {9, 2}, {true, false}), input, six, 0)); auto one = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1))); input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize( ShapeUtil::MakeShape(F32, {9, 2}, {true, true}), input, one, 1)); auto dynamic_reshape = builder.AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(F32, {9, 1, 2}, {true, false, true}), input)); module_->AddEntryComputation(builder.Build()); TF_ASSERT_OK(RunInference()); EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 0), six); EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 1), nullptr); EXPECT_EQ(inference_->GetDynamicSize(dynamic_reshape, {}, 2), one); } TEST_F(DynamicDimensionInferenceTest, HandleMapInDynamicDimensionInference) { const char* module_str = R"( HloModule test_module %scatter-combiner.285 (p0.286: c128[], p1.287: c128[]) -> c128[] { %p0.286 = c128[] parameter(0) %p1.287 = c128[] parameter(1) ROOT %add.288 = c128[] add(c128[] %p0.286, c128[] %p1.287) } %while_body { %reshape.8 = s32[] parameter(4) %reshape.7 = c128[1]{0} parameter(3) %reduce = pred[] parameter(2) %concatenate = s32[1]{0} parameter(1) %slice.4 = s32[1]{0} slice(s32[1]{0} %concatenate), slice={[0 : 1]} %broadcast.7 = pred[1]{0} broadcast(pred[] %reduce), dimensions={} %param.1 = (s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) parameter(0) %get-tuple-element.2 = c128[<=1]{0} get-tuple-element((s32[],c128[<=1]{0},s32[1]{0},c128[1]{0}) %param.1), index=1 %dynamic-slice.2 = c128[1]{0} dynamic-slice(c128[<=1]{0} %get-tuple-element.2,s32[] %reshape.8), dynamic_slice_sizes={1} %map = c128[1]{0} map(c128[1]{0} %dynamic-slice.2,c128[1]{0} %reshape.7), dimensions={0}, to_apply=%scatter-combiner.285 %select = c128[1]{0} select(pred[1]{0} %broadcast.7,c128[1]{0} %map,c128[1]{0} %dynamic-slice.2) %reshape.9 = s32[] reshape(s32[1]{0} %slice.4) %dynamic-update-slice = c128[<=1]{0} dynamic-update-slice(c128[<=1]{0} %get-tuple-element.2,c128[1]{0} %select,s32[] %reshape.9) })"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnUnverifiedModule(module_str)); TF_ASSERT_OK(RunInference()); } TEST_F(DynamicDimensionInferenceTest, RuntimeShapeCheck) { const char* hlo = R"( HloModule module ENTRY computation { a = f32[20,20] parameter(0) a_size_1 = s32[] parameter(1) a_size_2 = s32[] parameter(2) a_dynamic_1 = f32[<=20,20] set-dimension-size(a, a_size_1), dimensions={0} a_dynamic_2 = f32[<=20,<=20] set-dimension-size(a_dynamic_1, a_size_2), dimensions={1} b = f32[20,20] parameter(3) b_size_1 = s32[] parameter(4) b_size_2 = s32[] parameter(5) b_dynamic_1 = f32[<=20,20] set-dimension-size(b, b_size_1), dimensions={0} b_dynamic_2 = f32[<=20,<=20] set-dimension-size(b_dynamic_1, b_size_2), dimensions={1} ROOT f = add(a_dynamic_2, b_dynamic_2) } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo)); TF_ASSERT_OK(RunInference( nullptr, nullptr, DynamicDimensionInference::ShapeCheckMode::kRuntime, [&](HloInstruction* constraint) { constraint->parent()->AddInstruction(HloInstruction::CreateCustomCall( ShapeUtil::MakeTokenShape(), {constraint}, "__xla__assert", std::string{}, API_VERSION_STATUS_RETURNING)); })); absl::StatusOr<bool> filecheck_result = RunFileCheck(module_->ToString({}), R"( )"); TF_ASSERT_OK(filecheck_result.status()); EXPECT_TRUE(*filecheck_result); } TEST_F(DynamicDimensionInferenceTest, NestedControlFlow) { const char* hlo = R"( HloModule tfcompile.377, entry_computation_layout={(s32[], f32[250]{0}, pred[], pred[], s32[], pred[], s32[], pred[])->(f32[3]{0})} cond_2_Sum-reduction.17 { x.18 = f32[] parameter(0) y.19 = f32[] parameter(1) ROOT add.20 = f32[] add(x.18, y.19) } cond_2_cond_true_214__.21 { arg_tuple.22 = () parameter(0) constant.23 = s32[] constant(1) reshape.24 = s32[] reshape(constant.23) ROOT tuple.25 = (s32[]) tuple(constant.23) } cond_2_cond_false_215__.26 { arg_tuple.27 = () parameter(0) constant.28 = s32[] constant(0) reshape.29 = s32[] reshape(constant.28) ROOT tuple.30 = (s32[]) tuple(constant.28) } cond_2_true_195__.31 { arg_tuple.32 = (s32[], f32[250]{0}) parameter(0) get-tuple-element.33 = s32[] get-tuple-element(arg_tuple.32), index=0 constant.35 = s32[] constant(20) minimum.36 = s32[] minimum(get-tuple-element.33, constant.35) reshape.37 = s32[1]{0} reshape(minimum.36) concatenate.38 = s32[1]{0} concatenate(reshape.37), dimensions={0} slice.48 = s32[1]{0} slice(concatenate.38), slice={[0:1]} reshape.49 = s32[] reshape(reshape.37) constant.43 = s32[] constant(0) compare.50 = pred[] compare(minimum.36, constant.43), direction=LT constant.44 = s32[] constant(250) add.51 = s32[] add(constant.44, minimum.36) select.52 = s32[] select(compare.50, add.51, minimum.36) constant.45 = s32[1]{0} constant({0}) slice.46 = s32[1]{0} slice(constant.45), slice={[0:1]} reshape.47 = s32[] reshape(slice.46) subtract.53 = s32[] subtract(select.52, reshape.47) maximum.54 = s32[] maximum(subtract.53, constant.43) convert.55 = s32[] convert(maximum.54) get-tuple-element.34 = f32[250]{0} get-tuple-element(arg_tuple.32), index=1 constant.39 = f32[] constant(0) pad.40 = f32[500]{0} pad(get-tuple-element.34, constant.39), padding=0_250 constant.41 = s32[] constant(500) set-dimension-size.42 = f32[500]{0} set-dimension-size(pad.40, constant.41), dimensions={0} dynamic-slice.56 = f32[250]{0} dynamic-slice(set-dimension-size.42, reshape.47), dynamic_slice_sizes={250} reshape.57 = f32[250]{0} reshape(dynamic-slice.56) set-dimension-size.58 = f32[<=250]{0} set-dimension-size(dynamic-slice.56, maximum.54), dimensions={0} constant.59 = f32[] constant(1) broadcast.60 = f32[250]{0} broadcast(constant.59), dimensions={} compare.61 = pred[<=250]{0} compare(set-dimension-size.58, broadcast.60), direction=GE convert.62 = f32[<=250]{0} convert(compare.61) convert.63 = f32[<=250]{0} convert(convert.62) constant.64 = f32[] constant(0) convert.65 = f32[] convert(constant.64) reduce.66 = f32[] reduce(convert.62, constant.64), dimensions={0}, to_apply=cond_2_Sum-reduction.17 convert.67 = f32[] convert(reduce.66) reshape.73 = f32[] reshape(reduce.66) constant.68 = f32[] constant(6) compare.69 = pred[] compare(reduce.66, constant.68), direction=GE tuple.70 = () tuple() conditional.71 = (s32[]) conditional(compare.69, tuple.70, tuple.70), true_computation=cond_2_cond_true_214__.21, false_computation=cond_2_cond_false_215__.26 get-tuple-element.72 = s32[] get-tuple-element(conditional.71), index=0 reshape.74 = s32[] reshape(get-tuple-element.72) ROOT tuple.75 = (f32[], s32[]) tuple(reduce.66, get-tuple-element.72) } cond_2_false_196__.76 { arg_tuple.77 = (s32[], f32[250]{0}) parameter(0) constant.80 = f32[] constant(0) reshape.82 = f32[] reshape(constant.80) constant.81 = s32[] constant(0) reshape.83 = s32[] reshape(constant.81) ROOT tuple.84 = (f32[], s32[]) tuple(constant.80, constant.81) } cond_true_10__.85 { arg_tuple.86 = (pred[], pred[], pred[]) parameter(0) get-tuple-element.87 = pred[] get-tuple-element(arg_tuple.86), index=0 reshape.90 = pred[] reshape(get-tuple-element.87) ROOT tuple.91 = (pred[]) tuple(get-tuple-element.87) } cond_cond_true_16__.92 { arg_tuple.93 = (pred[], pred[]) parameter(0) get-tuple-element.94 = pred[] get-tuple-element(arg_tuple.93), index=0 reshape.96 = pred[] reshape(get-tuple-element.94) ROOT tuple.97 = (pred[]) tuple(get-tuple-element.94) } cond_cond_false_17__.98 { arg_tuple.99 = (pred[], pred[]) parameter(0) get-tuple-element.101 = pred[] get-tuple-element(arg_tuple.99), index=1 reshape.102 = pred[] reshape(get-tuple-element.101) ROOT tuple.103 = (pred[]) tuple(get-tuple-element.101) } cond_false_11__.104 { arg_tuple.105 = (pred[], pred[], pred[]) parameter(0) get-tuple-element.107 = pred[] get-tuple-element(arg_tuple.105), index=1 get-tuple-element.108 = pred[] get-tuple-element(arg_tuple.105), index=2 tuple.109 = (pred[], pred[]) tuple(get-tuple-element.107, get-tuple-element.108) conditional.110 = (pred[]) conditional(get-tuple-element.107, tuple.109, tuple.109), true_computation=cond_cond_true_16__.92, false_computation=cond_cond_false_17__.98 get-tuple-element.111 = pred[] get-tuple-element(conditional.110), index=0 reshape.112 = pred[] reshape(get-tuple-element.111) ROOT tuple.113 = (pred[]) tuple(get-tuple-element.111) } cond_1_map_while_cond_true_82__.114 { arg_tuple.115 = (f32[]) parameter(0) constant.117 = f32[] constant(0) reshape.118 = f32[] reshape(constant.117) ROOT tuple.119 = (f32[]) tuple(constant.117) } cond_1_map_while_cond_cond_true_91__.120 { constant.123 = f32[] constant(0.1) arg_tuple.121 = (f32[]) parameter(0) get-tuple-element.122 = f32[] get-tuple-element(arg_tuple.121), index=0 multiply.124 = f32[] multiply(constant.123, get-tuple-element.122) constant.125 = f32[] constant(0) add.126 = f32[] add(multiply.124, constant.125) constant.127 = f32[] constant(0.9) divide.128 = f32[] divide(add.126, constant.127) reshape.129 = f32[] reshape(divide.128) ROOT tuple.130 = (f32[]) tuple(divide.128) } cond_1_map_while_cond_cond_cond_true_106__.131 { constant.134 = f32[] constant(0.8) arg_tuple.132 = (f32[]) parameter(0) get-tuple-element.133 = f32[] get-tuple-element(arg_tuple.132), index=0 multiply.135 = f32[] multiply(constant.134, get-tuple-element.133) constant.136 = f32[] constant(-0.711) add.137 = f32[] add(multiply.135, constant.136) constant.138 = f32[] constant(0.09) divide.139 = f32[] divide(add.137, constant.138) reshape.140 = f32[] reshape(divide.139) ROOT tuple.141 = (f32[]) tuple(divide.139) } cond_1_map_while_cond_cond_cond_cond_true_121__.142 { constant.145 = f32[] constant(0.2) arg_tuple.143 = (f32[]) parameter(0) get-tuple-element.144 = f32[] get-tuple-element(arg_tuple.143), index=0 multiply.146 = f32[] multiply(constant.145, get-tuple-element.144) constant.147 = f32[] constant(-0.18) add.148 = f32[] add(multiply.146, constant.147) constant.149 = f32[] constant(0.02) divide.150 = f32[] divide(add.148, constant.149) reshape.151 = f32[] reshape(divide.150) ROOT tuple.152 = (f32[]) tuple(divide.150) } cond_1_map_while_cond_cond_cond_cond_cond_true_136__.153 { constant.156 = f32[] constant(0.1) arg_tuple.154 = (f32[]) parameter(0) get-tuple-element.155 = f32[] get-tuple-element(arg_tuple.154), index=0 multiply.157 = f32[] multiply(constant.156, get-tuple-element.155) constant.158 = f32[] constant(108.788) add.159 = f32[] add(multiply.157, constant.158) constant.160 = f32[] constant(98.99) divide.161 = f32[] divide(add.159, constant.160) reshape.162 = f32[] reshape(divide.161) ROOT tuple.163 = (f32[]) tuple(divide.161) } cond_1_map_while_cond_cond_cond_cond_cond_false_137__.164 { arg_tuple.165 = (f32[]) parameter(0) constant.167 = f32[] constant(1.2) reshape.168 = f32[] reshape(constant.167) ROOT tuple.169 = (f32[]) tuple(constant.167) } cond_1_map_while_cond_cond_cond_cond_false_122__.170 { arg_tuple.171 = (f32[]) parameter(0) get-tuple-element.172 = f32[] get-tuple-element(arg_tuple.171), index=0 constant.173 = f32[] constant(100) compare.174 = pred[] compare(get-tuple-element.172, constant.173), direction=LE tuple.175 = (f32[]) tuple(get-tuple-element.172) conditional.176 = (f32[]) conditional(compare.174, tuple.175, tuple.175), true_computation=cond_1_map_while_cond_cond_cond_cond_cond_true_136__.153, false_computation=cond_1_map_while_cond_cond_cond_cond_cond_false_137__.164 get-tuple-element.177 = f32[] get-tuple-element(conditional.176), index=0 reshape.178 = f32[] reshape(get-tuple-element.177) ROOT tuple.179 = (f32[]) tuple(get-tuple-element.177) } cond_1_map_while_cond_cond_cond_false_107__.180 { arg_tuple.181 = (f32[]) parameter(0) get-tuple-element.182 = f32[] get-tuple-element(arg_tuple.181), index=0 constant.183 = f32[] constant(1.01) compare.184 = pred[] compare(get-tuple-element.182, constant.183), direction=LE tuple.185 = (f32[]) tuple(get-tuple-element.182) conditional.186 = (f32[]) conditional(compare.184, tuple.185, tuple.185), true_computation=cond_1_map_while_cond_cond_cond_cond_true_121__.142, false_computation=cond_1_map_while_cond_cond_cond_cond_false_122__.170 get-tuple-element.187 = f32[] get-tuple-element(conditional.186), index=0 reshape.188 = f32[] reshape(get-tuple-element.187) ROOT tuple.189 = (f32[]) tuple(get-tuple-element.187) } cond_1_map_while_cond_cond_false_92__.190 { arg_tuple.191 = (f32[]) parameter(0) get-tuple-element.192 = f32[] get-tuple-element(arg_tuple.191), index=0 constant.193 = f32[] constant(0.99) compare.194 = pred[] compare(get-tuple-element.192, constant.193), direction=LE tuple.195 = (f32[]) tuple(get-tuple-element.192) conditional.196 = (f32[]) conditional(compare.194, tuple.195, tuple.195), true_computation=cond_1_map_while_cond_cond_cond_true_106__.131, false_computation=cond_1_map_while_cond_cond_cond_false_107__.180 get-tuple-element.197 = f32[] get-tuple-element(conditional.196), index=0 reshape.198 = f32[] reshape(get-tuple-element.197) ROOT tuple.199 = (f32[]) tuple(get-tuple-element.197) } cond_1_map_while_cond_false_83__.200 { arg_tuple.201 = (f32[]) parameter(0) get-tuple-element.202 = f32[] get-tuple-element(arg_tuple.201), index=0 constant.203 = f32[] constant(0.9) compare.204 = pred[] compare(get-tuple-element.202, constant.203), direction=LE tuple.205 = (f32[]) tuple(get-tuple-element.202) conditional.206 = (f32[]) conditional(compare.204, tuple.205, tuple.205), true_computation=cond_1_map_while_cond_cond_true_91__.120, false_computation=cond_1_map_while_cond_cond_false_92__.190 get-tuple-element.207 = f32[] get-tuple-element(conditional.206), index=0 reshape.208 = f32[] reshape(get-tuple-element.207) ROOT tuple.209 = (f32[]) tuple(get-tuple-element.207) } cond_1_map_while_body_59__.210 { arg_tuple.211 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0) get-tuple-element.212 = s32[] get-tuple-element(arg_tuple.211), index=0 constant.218 = s32[] constant(1) add.219 = s32[] add(get-tuple-element.212, constant.218) reshape.239 = s32[] reshape(add.219) get-tuple-element.213 = s32[] get-tuple-element(arg_tuple.211), index=1 reshape.240 = s32[] reshape(get-tuple-element.213) get-tuple-element.214 = s32[] get-tuple-element(arg_tuple.211), index=2 constant.220 = s32[] constant(1) add.221 = s32[] add(get-tuple-element.214, constant.220) reshape.241 = s32[] reshape(add.221) get-tuple-element.216 = s32[] get-tuple-element(arg_tuple.211), index=4 reshape.242 = s32[] reshape(get-tuple-element.216) get-tuple-element.215 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=3 get-tuple-element.235 = f32[<=250]{0} get-tuple-element(get-tuple-element.215), index=0 get-tuple-element.217 = (f32[<=250]{0}, s32[]) get-tuple-element(arg_tuple.211), index=5 get-tuple-element.223 = f32[<=250]{0} get-tuple-element(get-tuple-element.217), index=0 dynamic-slice.224 = f32[1]{0} dynamic-slice(get-tuple-element.223, get-tuple-element.214), dynamic_slice_sizes={1} reshape.225 = f32[] reshape(dynamic-slice.224) constant.226 = f32[] constant(0) compare.227 = pred[] compare(reshape.225, constant.226), direction=LE tuple.228 = (f32[]) tuple(reshape.225) conditional.229 = (f32[]) conditional(compare.227, tuple.228, tuple.228), true_computation=cond_1_map_while_cond_true_82__.114, false_computation=cond_1_map_while_cond_false_83__.200 get-tuple-element.230 = f32[] get-tuple-element(conditional.229), index=0 reshape.233 = f32[1]{0} reshape(get-tuple-element.230) dynamic-update-slice.236 = f32[<=250]{0} dynamic-update-slice(get-tuple-element.235, reshape.233, get-tuple-element.214) get-tuple-element.237 = s32[] get-tuple-element(get-tuple-element.215), index=1 tuple.238 = (f32[<=250]{0}, s32[]) tuple(dynamic-update-slice.236, get-tuple-element.237) ROOT tuple.243 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(add.219, get-tuple-element.213, add.221, tuple.238, get-tuple-element.216, get-tuple-element.217) } cond_wrapper.257 { inputs.258 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) parameter(0) get-tuple-element.0 = s32[] get-tuple-element(inputs.258), index=0 get-tuple-element.1 = s32[] get-tuple-element(inputs.258), index=1 compare.0 = pred[] compare(get-tuple-element.0, get-tuple-element.1), direction=LT get-tuple-element.2 = s32[] get-tuple-element(inputs.258), index=2 get-tuple-element.3 = s32[] get-tuple-element(inputs.258), index=4 compare.1 = pred[] compare(get-tuple-element.2, get-tuple-element.3), direction=LT and.0 = pred[] and(compare.0, compare.1) tuple.0 = (pred[]) tuple(and.0) ROOT get-tuple-element.260 = pred[] get-tuple-element(tuple.0), index=0 reshape.0 = pred[] reshape(and.0) } cond_1_Sum-reduction.261 { x.262 = f32[] parameter(0) y.263 = f32[] parameter(1) ROOT add.264 = f32[] add(x.262, y.263) } cond_1_true_36__.265 { arg_tuple.266 = (s32[], f32[250]{0}) parameter(0) get-tuple-element.267 = s32[] get-tuple-element(arg_tuple.266), index=0 reshape.269 = s32[1]{0} reshape(get-tuple-element.267) concatenate.270 = s32[1]{0} concatenate(reshape.269), dimensions={0} slice.280 = s32[1]{0} slice(concatenate.270), slice={[0:1]} reshape.281 = s32[] reshape(reshape.269) constant.275 = s32[] constant(0) compare.282 = pred[] compare(get-tuple-element.267, constant.275), direction=LT constant.276 = s32[] constant(250) add.283 = s32[] add(constant.276, get-tuple-element.267) select.284 = s32[] select(compare.282, add.283, get-tuple-element.267) constant.277 = s32[1]{0} constant({0}) slice.278 = s32[1]{0} slice(constant.277), slice={[0:1]} reshape.279 = s32[] reshape(slice.278) subtract.285 = s32[] subtract(select.284, reshape.279) maximum.286 = s32[] maximum(subtract.285, constant.275) convert.287 = s32[] convert(maximum.286) get-tuple-element.268 = f32[250]{0} get-tuple-element(arg_tuple.266), index=1 constant.271 = f32[] constant(0) pad.272 = f32[500]{0} pad(get-tuple-element.268, constant.271), padding=0_250 constant.273 = s32[] constant(500) set-dimension-size.274 = f32[500]{0} set-dimension-size(pad.272, constant.273), dimensions={0} dynamic-slice.288 = f32[250]{0} dynamic-slice(set-dimension-size.274, reshape.279), dynamic_slice_sizes={250} reshape.289 = f32[250]{0} reshape(dynamic-slice.288) set-dimension-size.290 = f32[<=250]{0} set-dimension-size(dynamic-slice.288, maximum.286), dimensions={0} get-dimension-size.291 = s32[] get-dimension-size(set-dimension-size.290), dimensions={0} convert.292 = s32[] convert(get-dimension-size.291) broadcast.293 = s32[1]{0} broadcast(get-dimension-size.291), dimensions={} concatenate.294 = s32[1]{0} concatenate(broadcast.293), dimensions={0} slice.295 = s32[1]{0} slice(concatenate.294), slice={[0:1]} reshape.296 = s32[] reshape(broadcast.293) constant.309 = s32[] constant(0) constant.310 = s32[] constant(0) constant.312 = f32[] constant(0) broadcast.313 = f32[250]{0} broadcast(constant.312), dimensions={} constant.302 = s32[] constant(0) broadcast.303 = s32[250]{0} broadcast(constant.302), dimensions={} set-dimension-size.304 = s32[<=250]{0} set-dimension-size(broadcast.303, get-dimension-size.291), dimensions={0} get-dimension-size.311 = s32[] get-dimension-size(set-dimension-size.304), dimensions={0} set-dimension-size.314 = f32[<=250]{0} set-dimension-size(broadcast.313, get-dimension-size.311), dimensions={0} constant.315 = s32[] constant(0) tuple.316 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.314, constant.315) constant.305 = s32[] constant(250) tuple.306 = (f32[<=250]{0}, s32[]) tuple(set-dimension-size.290, constant.305) tuple.317 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(constant.309, get-dimension-size.291, constant.310, tuple.316, get-dimension-size.291, tuple.306) while.318 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) while(tuple.317), condition=cond_wrapper.257, body=cond_1_map_while_body_59__.210 get-tuple-element.319 = s32[] get-tuple-element(while.318), index=0 get-tuple-element.320 = s32[] get-tuple-element(while.318), index=1 get-tuple-element.321 = s32[] get-tuple-element(while.318), index=2 get-tuple-element.322 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=3 get-tuple-element.323 = s32[] get-tuple-element(while.318), index=4 get-tuple-element.324 = (f32[<=250]{0}, s32[]) get-tuple-element(while.318), index=5 tuple.325 = (s32[], s32[], s32[], (f32[<=250]{0}, s32[]), s32[], (f32[<=250]{0}, s32[])) tuple(get-tuple-element.319, get-tuple-element.320, get-tuple-element.321, get-tuple-element.322, get-tuple-element.323, get-tuple-element.324) get-tuple-element.329 = (f32[<=250]{0}, s32[]) get-tuple-element(tuple.325), index=3 get-tuple-element.332 = f32[<=250]{0} get-tuple-element(get-tuple-element.329), index=0 convert.333 = f32[<=250]{0} convert(get-tuple-element.332) constant.334 = f32[] constant(0) convert.335 = f32[] convert(constant.334) reduce.336 = f32[] reduce(get-tuple-element.332, constant.334), dimensions={0}, to_apply=cond_1_Sum-reduction.261 convert.337 = f32[] convert(reduce.336) reshape.338 = f32[] reshape(reduce.336) ROOT tuple.339 = (f32[]) tuple(reduce.336) } cond_1_false_37__.340 { arg_tuple.341 = (s32[], f32[250]{0}) parameter(0) constant.344 = f32[] constant(0) reshape.345 = f32[] reshape(constant.344) ROOT tuple.346 = (f32[]) tuple(constant.344) } ENTRY tfcompile.377 { arg6.7 = s32[] parameter(6), parameter_replication={false} arg0.1 = s32[] parameter(0), parameter_replication={false} reshape.9 = s32[] reshape(arg0.1) arg1.2 = f32[250]{0} parameter(1), parameter_replication={false} reshape.10 = f32[250]{0} reshape(arg1.2) arg2.3 = pred[] parameter(2), parameter_replication={false} reshape.11 = pred[] reshape(arg2.3) arg3.4 = pred[] parameter(3), parameter_replication={false} reshape.12 = pred[] reshape(arg3.4) arg4.5 = s32[] parameter(4), parameter_replication={false} reshape.13 = s32[] reshape(arg4.5) arg5.6 = pred[] parameter(5), parameter_replication={false} reshape.14 = pred[] reshape(arg5.6) arg7.8 = pred[] parameter(7), parameter_replication={false} reshape.16 = pred[] reshape(arg7.8) tuple.1 = (s32[], f32[250]{0}) tuple(arg0.1, arg1.2) conditional.0 = (f32[], s32[]) conditional(arg2.3, tuple.1, tuple.1), true_computation=cond_2_true_195__.31, false_computation=cond_2_false_196__.76 get-tuple-element.4 = f32[] get-tuple-element(conditional.0), index=0 reshape.1 = f32[1]{0} reshape(get-tuple-element.4) get-tuple-element.5 = s32[] get-tuple-element(conditional.0), index=1 convert.0 = f32[] convert(get-tuple-element.5) reshape.2 = f32[1]{0} reshape(convert.0) tuple.2 = (pred[], pred[], pred[]) tuple(arg3.4, arg5.6, arg7.8) conditional.1 = (pred[]) conditional(arg3.4, tuple.2, tuple.2), true_computation=cond_true_10__.85, false_computation=cond_false_11__.104 get-tuple-element.6 = pred[] get-tuple-element(conditional.1), index=0 tuple.3 = (s32[], f32[250]{0}) tuple(arg4.5, arg1.2) conditional.2 = (f32[]) conditional(get-tuple-element.6, tuple.3, tuple.3), true_computation=cond_1_true_36__.265, false_computation=cond_1_false_37__.340 get-tuple-element.7 = f32[] get-tuple-element(conditional.2), index=0 reshape.3 = f32[1]{0} reshape(get-tuple-element.7) concatenate.0 = f32[3]{0} concatenate(reshape.1, reshape.2, reshape.3), dimensions={0} tuple.4 = (f32[3]{0}) tuple(concatenate.0) get-tuple-element.374 = f32[3]{0} get-tuple-element(tuple.4), index=0 reshape.375 = f32[3]{0} reshape(get-tuple-element.374) ROOT tuple.376 = (f32[3]{0}) tuple(get-tuple-element.374) reshape.4 = f32[3]{0} reshape(concatenate.0) } )"; TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo)); TF_ASSERT_OK(RunInference()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_dimension_inference_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
79bd5411-3f18-46a2-ad61-ae5728c586c1
cpp
tensorflow/tensorflow
call_inliner
third_party/xla/xla/service/call_inliner.cc
third_party/xla/xla/service/call_inliner_test.cc
#include "xla/service/call_inliner.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_sharding_metadata.h" #include "xla/service/call_graph.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_domain_isolator.h" #include "xla/service/spmd/shardy/constants.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { class SubcomputationInsertionVisitor : public DfsHloVisitorWithDefault { public: explicit SubcomputationInsertionVisitor(HloInstruction* call) : call_(call), outer_(call->parent()) { CHECK_EQ(HloOpcode::kCall, call_->opcode()); } absl::Status DefaultAction(HloInstruction* hlo) override { std::vector<HloInstruction*> new_operands; for (HloInstruction* operand : hlo->operands()) { TF_ASSIGN_OR_RETURN(HloInstruction * new_operand, Resolve(operand)); new_operands.push_back(new_operand); } VLOG(1) << "Cloning HLO and adding to caller: " << hlo->ToString(); auto new_hlo = hlo->CloneWithNewOperands(hlo->shape(), new_operands); HloInstruction* new_hlo_pointer = outer_->AddInstruction(std::move(new_hlo)); TF_RETURN_IF_ERROR(NoteMapping(hlo, new_hlo_pointer)); for (HloInstruction* control_predecessor : hlo->control_predecessors()) { TF_ASSIGN_OR_RETURN(HloInstruction * new_control_predecessor, Resolve(control_predecessor)); TF_RETURN_IF_ERROR( new_control_predecessor->AddControlDependencyTo(new_hlo_pointer)); } return absl::OkStatus(); } absl::Status HandleParameter(HloInstruction* parameter) override { TF_RETURN_IF_ERROR(NoteMapping( parameter, call_->mutable_operand(parameter->parameter_number()))); return absl::OkStatus(); } absl::Status FinishVisit(HloInstruction* root) override { TF_ASSIGN_OR_RETURN(HloInstruction * new_root, Resolve(root)); VLOG(1) << "Replacing all uses of " << call_->ToString() << " with new root " << new_root->ToString(); return outer_->ReplaceInstruction(call_, new_root); } CallInliner::InlinedInstructionMap ConsumeInstructionMap() { return std::move(subcomputation_hlo_to_new_hlo_); } private: absl::StatusOr<HloInstruction*> Resolve(HloInstruction* subcomputation_hlo) { auto it = subcomputation_hlo_to_new_hlo_.find(subcomputation_hlo); if (it == subcomputation_hlo_to_new_hlo_.end()) { return NotFound( "Could not find mapping from subcomputation HLO %s to a cloned HLO.", subcomputation_hlo->ToString()); } return it->second; } absl::Status NoteMapping(HloInstruction* subcomputation_hlo, HloInstruction* new_hlo) { auto result = subcomputation_hlo_to_new_hlo_.insert( std::make_pair(subcomputation_hlo, new_hlo)); TF_RET_CHECK(result.second) << "A mapping for the subcomputation HLO is already present."; return absl::OkStatus(); } HloInstruction* call_; HloComputation* outer_; CallInliner::InlinedInstructionMap subcomputation_hlo_to_new_hlo_; }; bool InlineUnderShardy(HloInstruction* instruction) { return !(instruction->GetModule()->config().use_shardy_partitioner() && (absl::StrContains(instruction->to_apply()->name(), "shmap_body") || absl::StartsWith(instruction->to_apply()->name(), sdy::kManualComputationBodyFuncName.str()))); } } absl::StatusOr<CallInliner::InlinedInstructionMap> CallInliner::Inline(HloInstruction* call) { TF_RET_CHECK(call->opcode() == HloOpcode::kCall) << "Instruction was not a call op: " << call->opcode(); if (call->is_composite()) { FrontendAttributes frontend_attributes = call->frontend_attributes(); frontend_attributes.mutable_map()->erase("composite.name"); frontend_attributes.mutable_map()->erase("composite.attributes"); frontend_attributes.mutable_map()->erase("composite.version"); call->set_frontend_attributes(frontend_attributes); } const auto& callees = call->called_computations(); TF_RET_CHECK(callees.size() == 1); HloComputation* callee = callees[0]; if (call->has_frontend_attributes()) { const FrontendAttributes& call_attributes = call->frontend_attributes(); std::string has_fuse = call_attributes.map().contains("MUST_FUSE") ? "MUST_FUSE" : call_attributes.map().contains("MAXIMAL_FUSE") ? "MAXIMAL_FUSE" : ""; if (!has_fuse.empty()) { for (auto instruction : callee->instructions()) { if (instruction->IsFusible()) { FrontendAttributes frontend_attributes = instruction->frontend_attributes(); frontend_attributes.mutable_map()->insert( {has_fuse, call_attributes.map().at(has_fuse)}); instruction->set_frontend_attributes(frontend_attributes); } } } } SubcomputationInsertionVisitor visitor(call); TF_RETURN_IF_ERROR(callee->Accept(&visitor)); return visitor.ConsumeInstructionMap(); } bool CallInliner::IsInlineableCallOp(HloInstruction* instruction) const { return instruction->opcode() == HloOpcode::kCall && !instruction->has_backend_config() && !instruction->parent()->IsAsyncComputation() && InlineUnderShardy(instruction); } absl::StatusOr<bool> CallInliner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module); bool did_mutate = false; TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node) -> absl::Status { if (!HloInstruction::IsThreadIncluded( node.computation()->execution_thread(), execution_threads)) { return absl::OkStatus(); } VLOG(1) << "Visiting node: " << node.ToString(); for (HloInstruction* instruction : node.computation()->MakeInstructionPostOrder()) { if (IsInlineableCallOp(instruction)) { const auto& callees = instruction->called_computations(); TF_RET_CHECK(callees.size() == 1); if (!single_call_site_ || call_graph->GetNode(instruction->to_apply()) .caller_callsites() .size() == 1) { TF_ASSIGN_OR_RETURN(CallInliner::InlinedInstructionMap inline_map, Inline(instruction)); if (update_domain_) { HloDomainIsolator isolator( []() { return ShardingDomainCreator{}; }); for (const auto& [call_inst, inlined_inst] : inline_map) { TF_RETURN_IF_ERROR(isolator.UpdateDomains(inlined_inst).status()); } } did_mutate = true; } } } return absl::OkStatus(); })); if (did_mutate) { TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status()); } return did_mutate; } }
#include "xla/service/call_inliner.h" #include <cstdint> #include <string> #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/literal_util.h" #include "xla/service/hlo_parser.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { using CallInlinerTest = HloTestBase; TEST_F(CallInlinerTest, ControlDependenciesAreCarriedToCaller) { HloComputation::Builder inner(TestName() + ".inner"); HloInstruction* zero = inner.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(24.0f))); HloInstruction* one = inner.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); TF_ASSERT_OK(zero->AddControlDependencyTo(one)); auto module = CreateNewVerifiedModule(); HloComputation* inner_computation = module->AddEmbeddedComputation(inner.Build()); HloComputation::Builder outer(TestName() + ".outer"); Shape r0f32 = ShapeUtil::MakeShape(F32, {}); outer.AddInstruction( HloInstruction::CreateCall(r0f32, {}, inner_computation)); auto computation = module->AddEntryComputation(outer.Build()); CallInliner call_inliner; TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get())); ASSERT_TRUE(mutated); EXPECT_THAT(computation->root_instruction(), op::Constant()); EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(), 42); ASSERT_EQ(1, computation->root_instruction()->control_predecessors().size()); auto prior = computation->root_instruction()->control_predecessors()[0]; EXPECT_THAT(prior, op::Constant()); EXPECT_EQ(prior->literal().GetFirstElement<float>(), 24); } TEST_F(CallInlinerTest, CallsWithinWhileBodiesAreInlined) { const Shape pred = ShapeUtil::MakeShape(PRED, {}); auto module = CreateNewVerifiedModule(); HloComputation::Builder just_false(TestName() + ".false"); just_false.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); HloComputation* false_computation = module->AddEmbeddedComputation(just_false.Build()); HloComputation::Builder call_false_builder(TestName() + ".call_false"); call_false_builder.AddInstruction( HloInstruction::CreateParameter(0, pred, "param")); call_false_builder.AddInstruction( HloInstruction::CreateCall(pred, {}, false_computation)); HloComputation* call_false = module->AddEmbeddedComputation(call_false_builder.Build()); HloComputation::Builder outer(TestName() + ".outer"); HloInstruction* init_value = outer.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); outer.AddInstruction( HloInstruction::CreateWhile(pred, call_false, call_false, init_value)); auto computation = module->AddEntryComputation(outer.Build()); CallInliner call_inliner; TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get())); ASSERT_TRUE(mutated); EXPECT_THAT( computation->root_instruction()->while_condition()->root_instruction(), op::Constant()); EXPECT_THAT(computation->root_instruction()->while_body()->root_instruction(), op::Constant()); } TEST_F(CallInlinerTest, InlineWithoutRunningPass) { const Shape pred = ShapeUtil::MakeShape(PRED, {}); auto module = CreateNewVerifiedModule(); HloComputation::Builder just_false(TestName() + ".false"); auto* true_constant = just_false.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR1<bool>({true}))); auto* false_constant = just_false.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); TF_ASSERT_OK(false_constant->AddControlDependencyTo(true_constant)); HloComputation* false_computation = module->AddEmbeddedComputation(just_false.Build()); HloComputation::Builder call_false_builder(TestName() + ".call_false"); HloInstruction* call = call_false_builder.AddInstruction( HloInstruction::CreateCall(pred, {}, false_computation)); auto computation = module->AddEntryComputation(call_false_builder.Build()); TF_ASSERT_OK(CallInliner::Inline(call).status()); EXPECT_THAT(computation->root_instruction(), op::Constant()); EXPECT_THAT(computation->root_instruction()->control_successors(), ElementsAre(op::Constant())); } TEST_F(CallInlinerTest, InlineWithEmptyComputation) { const Shape pred = ShapeUtil::MakeShape(PRED, {}); auto module = CreateNewVerifiedModule(); Shape r0s32 = ShapeUtil::MakeShape(S32, {}); HloComputation::Builder empty(TestName() + ".empty"); empty.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A")); empty.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0))); HloComputation* empty_computation = module->AddEmbeddedComputation(empty.Build()); HloComputation::Builder empty2(TestName() + ".empty"); empty2.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A")); empty2.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0))); HloComputation* empty2_computation = module->AddEmbeddedComputation(empty2.Build()); HloComputation::Builder entry("entry"); auto zero = entry.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0))); entry.AddInstruction( HloInstruction::CreateCall(r0s32, {zero}, empty_computation)); HloInstruction* call1 = entry.AddInstruction( HloInstruction::CreateCall(r0s32, {zero}, empty2_computation)); entry.AddInstruction( HloInstruction::CreateCall(r0s32, {call1}, empty_computation)); auto computation = module->AddEntryComputation(entry.Build()); CallInliner call_inliner; TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get())); ASSERT_TRUE(mutated); EXPECT_THAT(computation->root_instruction(), op::Constant()); } TEST_F(CallInlinerTest, CallToOutfeedComputationIsInlined) { const Shape f32 = ShapeUtil::MakeShape(F32, {}); auto module = CreateNewVerifiedModule(); HloComputation::Builder outfeeder(TestName() + ".outfeeder"); auto value = outfeeder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0))); auto token = outfeeder.AddInstruction(HloInstruction::CreateToken()); outfeeder.AddInstruction( HloInstruction::CreateOutfeed(f32, value, token, "")); auto outfeed_computation = module->AddEmbeddedComputation(outfeeder.Build()); HloComputation::Builder outer(TestName() + ".outer"); outer.AddInstruction(HloInstruction::CreateCall( outfeed_computation->root_instruction()->shape(), {}, outfeed_computation)); module->AddEntryComputation(outer.Build()); CallInliner call_inliner; TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get())); ASSERT_TRUE(mutated); } TEST_F(CallInlinerTest, InlineSingleUseCalleesOnly) { const absl::string_view hlo_string = R"( HloModule inline_module a { ROOT tuple = () tuple() } b { ROOT tuple.1 = () tuple() } ENTRY inline { a = () call(), to_apply=a b = () call(), to_apply=a c = () call(), to_apply=b ROOT tuple = ((), (), ()) tuple(a, b, c) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CallInliner call_inliner(true); TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get())); ASSERT_TRUE(mutated); ASSERT_EQ(module->entry_computation()->instruction_count(), 4); auto inst = module->entry_computation()->instructions().begin(); EXPECT_THAT(*inst, op::Call()); ++inst; EXPECT_THAT(*inst, op::Call()); ++inst; EXPECT_THAT(*inst, op::Tuple()); ++inst; EXPECT_THAT(*inst, op::Tuple()); } TEST_F(CallInlinerTest, InliningPerformedInsideSpecifiedThreadsOnly) { const std::string hlo_string = R"( HloModule inline_specified_threads_only %secondary_inner () -> u32[] { ROOT %co.2 = u32[] constant(2) }, execution_thread="secondary_thread" %secondary_outer () -> u32[] { %co.1 = u32[] constant(1) %call.1 = u32[] call(), to_apply=%secondary_inner ROOT %add.1 = add(%co.1, %call.1) }, execution_thread="secondary_thread" %main_inner () -> u32[] { %co.0 = u32[] constant(0) %async-start = ((), u32[], u32[]) call-start(), async_execution_thread="secondary_thread", to_apply=secondary_outer %async-done = u32[] call-done(((), u32[], u32[]) %async-start) ROOT %add.2 = add(%co.0, %async-done) } ENTRY %main_outer (p0: u32[]) -> u32[] { %p.0 = u32[] parameter(0) %call.0 = u32[] call(), to_apply=%main_inner ROOT %add.3 = add(%p.0, %call.0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); auto module_clone = module->Clone(""); { VLOG(1) << "Module BEFORE CallInliner\n" << module->ToString(); CallInliner call_inliner; TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get())); VLOG(1) << "Module AFTER CallInliner\n" << module->ToString(); EXPECT_TRUE(mutated); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Add(op::Parameter(0), op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)), op::AsyncDone()))); EXPECT_THAT(module->entry_computation() ->root_instruction() ->operand(1) ->operand(1) ->async_wrapped_instruction() ->called_computations() .at(0) ->root_instruction(), op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)), op::Constant(LiteralUtil::CreateR0<uint32_t>(2)))); } VLOG(1) << "Restricting CallInliner to the secondary thread."; { CallInliner call_inliner; TF_ASSERT_OK_AND_ASSIGN( bool mutated, call_inliner.Run(module_clone.get(), {"secondary_thread"})); VLOG(1) << "Module AFTER CallInliner\n" << module_clone->ToString(); EXPECT_TRUE(mutated); EXPECT_THAT(module_clone->entry_computation()->root_instruction(), op::Add(op::Parameter(0), op::Call())); EXPECT_THAT(module_clone->entry_computation() ->root_instruction() ->operand(1) ->called_computations() .at(0) ->root_instruction(), op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)), op::AsyncDone())); EXPECT_THAT(module_clone->entry_computation() ->root_instruction() ->operand(1) ->called_computations() .at(0) ->root_instruction() ->operand(1) ->async_wrapped_instruction() ->called_computations() .at(0) ->root_instruction(), op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)), op::Constant(LiteralUtil::CreateR0<uint32_t>(2)))); } } TEST_F(CallInlinerTest, InlineCompositeCall) { const absl::string_view hlo_string = R"( HloModule composite %add (lhs: f32[]) -> f32[] { %lhs = f32[] parameter(0) %rhs = f32[] constant(2) ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs) } ENTRY %main () -> f32[] { %lhs = f32[] constant(42) ROOT %call = f32[] call(f32[] %lhs), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="1"} })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CallInliner call_inliner(true); TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get())); ASSERT_TRUE(mutated); ASSERT_EQ(module->entry_computation()->instruction_count(), 3); auto inst = module->entry_computation()->instructions().begin(); EXPECT_THAT(*inst, op::Constant()); ++inst; EXPECT_THAT(*inst, op::Constant()); ++inst; EXPECT_THAT(*inst, op::Add()); EXPECT_TRUE((*inst)->frontend_attributes().map().empty()); } TEST_F(CallInlinerTest, UseShardyMhloToHloShmapBodyNotInlined) { const char* const hloString = R"( HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}} %prefix_shmap_body_suffix.4 (Arg_0.5: f32[1,8]) -> f32[1,8] { %Arg_0.5 = f32[1,8]{1,0} parameter(0) ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11} } ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] { %Arg_0.1 = f32[8,8]{1,0} parameter(0) %custom-call.2 = f32[8,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="Sharding", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=3} %custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %custom-call.2), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4} %call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_shmap_body_suffix.4 %custom-call.8 = f32[1,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="Sharding", sharding={manual}, metadata={source_file="-" source_line=6} ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %custom-call.8), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString)); module->mutable_config().set_use_shardy_partitioner(true); TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get())); VLOG(1) << module->ToString(); EXPECT_FALSE(changed); HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall); EXPECT_NE(call, nullptr); EXPECT_TRUE(call->has_to_apply()); EXPECT_EQ(call->to_apply()->name(), "prefix_shmap_body_suffix.4"); } TEST_F(CallInlinerTest, UseShardManualComputationBodyNotInlined) { const char* const hloString = R"( HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}} %xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] { %Arg_0.5 = f32[1,8]{1,0} parameter(0) ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11} } ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] { %Arg_0.1 = f32[8,8]{1,0} parameter(0) %custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4} %call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%xla.sdy.manual_computation_body.4 ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString)); module->mutable_config().set_use_shardy_partitioner(true); TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get())); VLOG(1) << module->ToString(); EXPECT_FALSE(changed); HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall); EXPECT_NE(call, nullptr); EXPECT_TRUE(call->has_to_apply()); EXPECT_EQ(call->to_apply()->name(), "xla.sdy.manual_computation_body.4"); } TEST_F(CallInlinerTest, UseShardManualComputationBodyInlined) { const char* const hloString = R"( HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}} %prefix_xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] { %Arg_0.5 = f32[1,8]{1,0} parameter(0) ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11} } ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] { %Arg_0.1 = f32[8,8]{1,0} parameter(0) %custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4} %call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_xla.sdy.manual_computation_body.4 ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString)); module->mutable_config().set_use_shardy_partitioner(true); TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get())); VLOG(1) << module->ToString(); EXPECT_TRUE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
164bcdca-2f59-413e-98d0-a4c8cf57007b
cpp
tensorflow/tensorflow
reshape_decomposer
third_party/xla/xla/service/reshape_decomposer.cc
third_party/xla/xla/service/reshape_decomposer_test.cc
#include "xla/service/reshape_decomposer.h" #include "absl/status/status.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/service/hlo_creation_utils.h" namespace xla { namespace { class ReshapeDecomposerVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleReshape(HloInstruction* reshape) override { HloInstruction* operand = reshape->mutable_operand(0); auto s = reshape->shape(); auto s0 = operand->shape(); if (ShapeUtil::ReshapeIsBitcast(s, s0)) { auto b = MakeBitcastHlo(operand, s, &operand->metadata()); return ReplaceInstruction(reshape, b); } else if (auto output_aligned_input_shape = ShapeUtil::AlignLayouts(s, s0)) { Shape new_input_shape = *output_aligned_input_shape; HloInstruction* copied_operand = MakeCopyHlo(operand, new_input_shape); VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical " "transpose on the operand: " << copied_operand->ToString(); auto b = MakeBitcastHlo(copied_operand, s, &copied_operand->metadata()); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, b)); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); } else if (auto input_aligned_output_shape = ShapeUtil::AlignLayouts(s0, s)) { Shape new_output_shape = *input_aligned_output_shape; auto b = MakeBitcastHlo(operand, new_output_shape, &operand->metadata()); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); HloInstruction* copied_result = MakeCopyHlo(b, s); VLOG(3) << "Decomposing reshape into reshape-bitcast and a physical " "transposition on the result: " << copied_result->ToString(); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, copied_result)); } else { VLOG(3) << "Both input and output of reshape are not alignable, create " "two physical transposes"; auto s0_normalized = ShapeUtil::MakeShapeWithDescendingLayout( s0.element_type(), s0.dimensions()); auto c1 = MakeCopyHlo(reshape->mutable_operand(0), s0_normalized); auto s_normalized = ShapeUtil::MakeShapeWithDescendingLayout( s.element_type(), s.dimensions()); auto b = MakeBitcastHlo(c1, s_normalized, &c1->metadata()); DCHECK(ShapeUtil::ReshapeIsBitcast(b->shape(), b->operand(0)->shape())); auto c2 = MakeCopyHlo(b, s); TF_RETURN_IF_ERROR(ReplaceInstruction(reshape, c2)); } return absl::OkStatus(); } }; } absl::StatusOr<bool> ReshapeDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return ReshapeDecomposerVisitor{}.RunOnModule(module, execution_threads); } }
#include "xla/service/reshape_decomposer.h" #include <memory> #include <optional> #include "xla/service/hlo_parser.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { class ReshapeDecomposerTest : public HloTestBase { public: void CheckReshapeDecomposer(const char* hlo, std::optional<absl::string_view> expected) { RunAndFilecheckHloRewrite( hlo, ReshapeDecomposer{}, expected, [&](HloModule* module) { EXPECT_TRUE(absl::c_all_of( module->entry_computation()->instructions(), [&](const HloInstruction* instr) { return instr->opcode() != HloOpcode::kReshape || ShapeUtil::ReshapeIsBitcast(instr->operand(0)->shape(), instr->shape()); })); }); } }; TEST_F(ReshapeDecomposerTest, IsBitcast) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[8]{0} parameter(0) ROOT r = f32[4,2]{1,0} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, AlignableOutput) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[8,3]{1,0} parameter(0) ROOT r = f32[4,2,3]{0,1,2} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, AlignableInput) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[4,2,3]{0,1,2} parameter(0) ROOT r = f32[8,3]{1,0} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } TEST_F(ReshapeDecomposerTest, NotAlignable) { const char* hlo = R"( HloModule Module ENTRY main { p = f32[4,2,3,8]{0,2,1,3} parameter(0) ROOT r = f32[8,3,2,4]{0,2,1,3} reshape(p) } )"; CheckReshapeDecomposer(hlo, R"( )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reshape_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
872adbcd-9a42-46b0-9278-91c217a8afec
cpp
tensorflow/tensorflow
indexed_array_analysis
third_party/xla/xla/service/indexed_array_analysis.cc
third_party/xla/xla/service/indexed_array_analysis_test.cc
#include "xla/service/indexed_array_analysis.h" #include <algorithm> #include <numeric> #include <optional> #include <string> #include <utility> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/evaluator/hlo_evaluator.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/map_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using Analysis = IndexedArrayAnalysis; using UnknownArray = Analysis::UnknownArray; using ConstantArray = Analysis::ConstantArray; using ReshapedArray = Analysis::ReshapedArray; using ScalarIndexedArray = Analysis::ScalarIndexedArray; using absl::StrJoin; } std::string IndexedArrayAnalysis::ToString(Array* root, bool print_constants) { switch (root->kind()) { case Array::kUnknown: { auto* unknown_tensor = root->as<UnknownArray>(); return absl::StrCat("%", unknown_tensor->instruction().name()); } case Array::kConstant: { if (print_constants) { std::string contents = root->as<ConstantArray>()->literal()->ToString(); return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()), " ", contents, ")"); } return absl::StrCat("(constant ", ShapeUtil::HumanString(root->shape()), ")"); } case Array::kReshaped: { ReshapedArray* reshaped_array = root->as<ReshapedArray>(); return absl::StrCat( "(reshape ", ToString(reshaped_array->operand(), print_constants), " to ", ShapeUtil::HumanString(reshaped_array->shape()), ")"); } case Array::kScalarIndexedConstant: case Array::kScalarIndexed: { auto* indexed_array = root->as<ScalarIndexedArray>(); std::string name = root->kind() == Array::kScalarIndexedConstant ? "scalar-indexed-const" : "scalar-indexed"; return absl::StrCat( "(", name, " ", ToString(indexed_array->source(), print_constants), " ", ToString(indexed_array->indices(), print_constants), " ", indexed_array->source_dim(), "->[", StrJoin(indexed_array->output_dims(), ","), "])"); } } } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::GetArrayFor( const HloInstruction* instr) { auto it = cache_.find(instr); if (it != cache_.end()) { return it->second; } TF_RETURN_IF_ERROR(TraverseAndPopulateCache(instr)); return FindOrDie(cache_, instr); } absl::Status IndexedArrayAnalysis::TraverseAndPopulateCache( const HloInstruction* root) { absl::InlinedVector<const HloInstruction*, 4> stack; enum DfsState { kDiscovered, kVisited }; absl::flat_hash_map<const HloInstruction*, DfsState> dfs_state_map; stack.push_back(root); InsertOrDie(&dfs_state_map, root, kDiscovered); do { const HloInstruction* instr = stack.back(); if (cache_.contains(instr)) { stack.pop_back(); continue; } switch (FindOrDie(dfs_state_map, instr)) { case kDiscovered: { for (const HloInstruction* operand : instr->operands()) { if (!cache_.contains(operand)) { stack.push_back(operand); CHECK(!dfs_state_map.contains(operand) || dfs_state_map[operand] == kDiscovered); dfs_state_map[operand] = kDiscovered; } } dfs_state_map[instr] = kVisited; break; } case kVisited: stack.pop_back(); TF_ASSIGN_OR_RETURN(Array * array, ComputeArrayFor(instr)); InsertOrDie(&cache_, instr, array); break; } } while (!stack.empty()); return absl::OkStatus(); } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayFor( const HloInstruction* instr) { Array* computed_array; if (instr->IsElementwise() && instr->operand_count() == 1) { TF_ASSIGN_OR_RETURN( computed_array, ComputeArrayForElementwiseUnaryOp( instr->opcode(), FindOrDie(cache_, instr->operand(0)))); } else if (instr->IsElementwise() && instr->operand_count() == 2) { TF_ASSIGN_OR_RETURN( computed_array, ComputeArrayForElementwiseBinaryOp( instr->opcode(), FindOrDie(cache_, instr->operand(0)), FindOrDie(cache_, instr->operand(1)))); } else if (instr->opcode() == HloOpcode::kConstant) { TF_ASSIGN_OR_RETURN(computed_array, ComputeArrayForConstant(instr->literal())); } else if (instr->opcode() == HloOpcode::kGather) { TF_ASSIGN_OR_RETURN( computed_array, ComputeArrayForGather(instr->shape(), instr->gather_dimension_numbers(), instr->gather_slice_sizes(), FindOrDie(cache_, instr->operand(0)), FindOrDie(cache_, instr->operand(1)))); } else if (instr->opcode() == HloOpcode::kReshape) { TF_ASSIGN_OR_RETURN( computed_array, ComputeArrayForReshape(instr->shape(), FindOrDie(cache_, instr->operand(0)))); } else if (instr->opcode() == HloOpcode::kDot) { TF_ASSIGN_OR_RETURN( computed_array, ComputeArrayForDot(instr->shape(), instr->dot_dimension_numbers(), instr->precision_config(), FindOrDie(cache_, instr->operand(0)), FindOrDie(cache_, instr->operand(1)))); } else { computed_array = nullptr; } if (!computed_array) { computed_array = Construct<UnknownArray>(instr); } return computed_array; } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForConstant( const Literal& literal) { return Construct<ConstantArray>(&literal); } absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldGatherOfGather( ScalarIndexedArray* source, Array* indices, int64_t source_dim, absl::Span<const int64_t> output_dims, Shape shape) { Array* a = source->source(); Array* x = source->indices(); Array* y = indices; enum class IndexComponent { Ungathered, GatheredFirst, GatheredSecond }; std::vector<IndexComponent> simulated_index(a->shape().dimensions_size(), IndexComponent::Ungathered); EraseAt(&simulated_index, source->source_dim()); for (int64_t gather_dim : source->output_dims()) { simulated_index.insert(simulated_index.begin() + gather_dim, IndexComponent::GatheredFirst); } EraseAt(&simulated_index, source_dim); for (int64_t output_dim : output_dims) { simulated_index.insert(simulated_index.begin() + output_dim, IndexComponent::GatheredSecond); } int64_t source_dim_for_index_array = FindIndex(source->output_dims(), source_dim); CHECK_NE(source_dim_for_index_array, source->output_dims().size()); std::vector<int64_t> output_dims_for_index_array; int64_t gathered_index_components_seen = 0; for (IndexComponent simulation_dim : simulated_index) { if (simulation_dim == IndexComponent::GatheredSecond) { output_dims_for_index_array.push_back(gathered_index_components_seen); } if (simulation_dim != IndexComponent::Ungathered) { gathered_index_components_seen++; } } std::vector<int64_t> dim_sizes_for_composed_index; std::vector<int64_t> output_dims_for_new_gather; for (int64_t i = 0, e = simulated_index.size(); i < e; i++) { if (simulated_index[i] != IndexComponent::Ungathered) { dim_sizes_for_composed_index.push_back(shape.dimensions(i)); output_dims_for_new_gather.push_back(i); } } Array* inner_indices = ConstructScalarIndexedArray( x, y, source_dim_for_index_array, output_dims_for_index_array, ShapeUtil::MakeShape(x->shape().element_type(), dim_sizes_for_composed_index)); return ConstructScalarIndexedArray(a, inner_indices, source->source_dim(), output_dims_for_new_gather, std::move(shape)); } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForGather( const Shape& shape, const GatherDimensionNumbers& dim_numbers, absl::Span<const int64_t> slice_sizes, Array* source, Array* indices) { if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) { VLOG(3) << "ComputeArrayForGather: indices are not scalar"; return nullptr; } CHECK_EQ(dim_numbers.start_index_map_size(), 1); if (dim_numbers.collapsed_slice_dims_size() != 1 || dim_numbers.collapsed_slice_dims(0) != dim_numbers.start_index_map(0)) { VLOG(3) << "ComputeArrayForGather: gather operations must elide " "start_index_map[0] and " "start_index_map[0] only"; return nullptr; } for (int64_t i = 0, e = source->shape().dimensions_size(); i < e; i++) { if (i != dim_numbers.collapsed_slice_dims(0) && source->shape().dimensions(i) != slice_sizes[i]) { VLOG(3) << "ComputeArrayForGather: slice_sizes[" << i << "] != source->shape().dimensions(" << i << ") -- " << source->shape().dimensions(i) << " vs. " << slice_sizes[i] << " with dim_numbers.collapsed_slice_dims(0) = " << dim_numbers.collapsed_slice_dims(0); return nullptr; } } int64_t source_dim = dim_numbers.start_index_map(0); std::vector<int64_t> output_dims; for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) { if (!absl::c_binary_search(dim_numbers.offset_dims(), i)) { output_dims.push_back(i); } } if (auto* indexed = dynamic_cast<ScalarIndexedArray*>(source)) { if (absl::c_linear_search(indexed->output_dims(), source_dim)) { return FoldGatherOfGather(indexed, indices, source_dim, output_dims, shape); } } else if (auto* constant = dynamic_cast<ConstantArray*>(source)) { return Construct<ScalarIndexedConstantArray>(constant, indices, source_dim, output_dims, shape); } return Construct<ScalarIndexedArray>(source, indices, source_dim, output_dims, shape); } namespace { int64_t FindSuffixWithProduct(absl::Span<const int64_t> values, int64_t product) { DCHECK(absl::c_all_of(values, [](int64_t value) { return value > 0; })); int64_t current_product = 1; int64_t i; for (i = values.size() - 1; i >= 0 && product > current_product; --i) { current_product *= values[i]; } if (product == current_product) { return i + 1; } return -1; } struct ReshapePassthroughDimPair { int64_t result_dim; int64_t operand_dim; }; std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs( absl::Span<const int64_t> operand_shape, absl::Span<const int64_t> result_shape) { std::vector<ReshapePassthroughDimPair> result; int64_t result_subarray_size = 1; for (int64_t result_dim = result_shape.size() - 1; result_dim >= 0; --result_dim) { int64_t candidate_operand_dim = FindSuffixWithProduct(operand_shape, result_subarray_size); CHECK_NE(candidate_operand_dim, 0) << "result_dim = " << result_dim << ", result_subarray_size = " << result_subarray_size << ", result_shape = [" << StrJoin(result_shape, ",") << "]" << ", operand_shape = [" << StrJoin(operand_shape, ",") << "]"; if (candidate_operand_dim != -1 && result_shape[result_dim] == operand_shape[candidate_operand_dim - 1]) { result.push_back({result_dim, candidate_operand_dim - 1}); } result_subarray_size *= result_shape[result_dim]; } absl::c_reverse(result); if (VLOG_IS_ON(3)) { std::vector<std::string> result_strings; absl::c_transform(result, std::back_inserter(result_strings), [](ReshapePassthroughDimPair value) { return absl::StrCat(value.result_dim, "->", value.operand_dim); }); VLOG(3) << "For a reshape from [" << StrJoin(operand_shape, ",") << "] to [" << StrJoin(result_shape, ",") << "] passthrough indices are [" << StrJoin(result_strings, ",") << "] (legend: `result`->`operand`)"; } DCHECK(absl::c_is_sorted( result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) { return lhs.result_dim < rhs.result_dim; })); DCHECK(absl::c_is_sorted( result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) { return lhs.operand_dim < rhs.operand_dim; })); return result; } bool IsReshapePassthroughOperandDim( absl::Span<const ReshapePassthroughDimPair> passthrough_dims, int64_t dim) { return absl::c_any_of(passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) { return passthrough_dim_pair.operand_dim == dim; }); } int64_t MapPassthroughOperandDimToResultDim( absl::Span<const ReshapePassthroughDimPair> passthrough_dims, int64_t operand_dim) { auto it = absl::c_find_if( passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) { return passthrough_dim_pair.operand_dim == operand_dim; }); CHECK(it != passthrough_dims.end()); return it->result_dim; } int64_t FindSourcePositionForPassthroughResultDim( absl::Span<const int64_t> operand_shape, absl::Span<const int64_t> result_shape, int64_t source_passthrough_dim) { VLOG(3) << "FindSourcePositionForPassthroughResultDim([" << StrJoin(operand_shape, ",") << "], [" << StrJoin(result_shape, ",") << "], " << source_passthrough_dim << ")"; int64_t indexed_source_subarray_size = std::accumulate(operand_shape.begin() + source_passthrough_dim + 1, operand_shape.end(), 1LL, std::multiplies<int64_t>()); return FindSuffixWithProduct(result_shape, indexed_source_subarray_size); } Shape StripDegenerateDimensions(const Shape& shape) { DimensionVector new_dims; absl::c_copy_if(shape.dimensions(), std::back_inserter(new_dims), [](int64_t dim) { return dim != 1; }); return ShapeUtil::MakeShape(shape.element_type(), new_dims); } }; absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::ReshapeToRemoveDegenerateDims( ScalarIndexedArray* operand) { const Shape& shape = operand->shape(); if (!ShapeUtil::HasDegenerateDimensions(shape)) { return operand; } const Shape& source_shape = operand->source()->shape(); DimensionVector new_source_shape_dims; for (int64_t i = 0, e = source_shape.dimensions_size(); i < e; i++) { if (i == operand->source_dim() || source_shape.dimensions(i) != 1) { new_source_shape_dims.push_back(source_shape.dimensions(i)); } } Shape new_source_shape = ShapeUtil::MakeShape(shape.element_type(), new_source_shape_dims); Shape new_indices_shape = StripDegenerateDimensions(operand->indices()->shape()); TF_ASSIGN_OR_RETURN( Array* const new_source, ComputeArrayForReshape(new_source_shape, operand->source())); TF_ASSIGN_OR_RETURN( Array* const new_indices, ComputeArrayForReshape(new_indices_shape, operand->indices())); DimensionVector new_output_dims; int64_t degenerate_dims_seen = 0; for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) { if (shape.dimensions(i) == 1) { degenerate_dims_seen++; } else if (absl::c_linear_search(operand->output_dims(), i)) { new_output_dims.push_back(i - degenerate_dims_seen); } } int64_t degenerate_dims_before_source_dim = std::count(source_shape.dimensions().begin(), source_shape.dimensions().begin() + operand->source_dim(), 1); int64_t new_source_dim = operand->source_dim() - degenerate_dims_before_source_dim; return ConstructScalarIndexedArray( new_source, new_indices, new_source_dim, InlinedVectorToVector(new_output_dims), StripDegenerateDimensions(operand->shape())); } absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::ReshapeToAddDegenerateDims( ScalarIndexedArray* operand, absl::Span<const int64_t> degenerate_dims) { if (degenerate_dims.empty()) { return operand; } CHECK(!ShapeUtil::HasDegenerateDimensions(operand->shape())); DimensionVector new_output_dims = [&]() { absl::InlinedVector<bool, 6> output_dims_bitvector( operand->shape().dimensions_size()); for (int64_t output_dim : operand->output_dims()) { output_dims_bitvector[output_dim] = true; } for (int64_t degenerate_dim : degenerate_dims) { InsertAt(&output_dims_bitvector, degenerate_dim, false); } DimensionVector result; result.reserve(operand->output_dims().size()); for (int64_t i = 0, e = output_dims_bitvector.size(); i < e; i++) { if (output_dims_bitvector[i]) { result.push_back(i); } } return result; }(); DimensionVector new_result_shape_dims; absl::c_copy(operand->shape().dimensions(), std::back_inserter(new_result_shape_dims)); for (int64_t degenerate_dim : degenerate_dims) { InsertAt(&new_result_shape_dims, degenerate_dim, 1); } DimensionVector new_source_shape_dims = new_result_shape_dims; for (int64_t output_dim : new_output_dims) { EraseAt(&new_source_shape_dims, output_dim); } int64_t new_source_dim = [&]() { for (int i = 0, e = new_source_shape_dims.size(); i < e; i++) { int64_t non_degenerate_dims_seen = 0; if (non_degenerate_dims_seen == operand->source_dim()) { return i; } if (new_source_shape_dims[new_source_dim] != 1) { non_degenerate_dims_seen++; } } LOG(FATAL) << "Did not find source dim in " << ToString(operand); }(); int64_t source_dim_size = operand->source()->shape().dimensions(operand->source_dim()); InsertAt(&new_source_shape_dims, new_source_dim, source_dim_size); Shape new_source_shape = ShapeUtil::MakeShape(operand->shape().element_type(), new_source_shape_dims); Shape new_result_shape = ShapeUtil::MakeShape(operand->shape().element_type(), new_result_shape_dims); TF_ASSIGN_OR_RETURN( Array* const new_source, ComputeArrayForReshape(new_source_shape, operand->source())); return ConstructScalarIndexedArray( new_source, operand->indices(), new_source_dim, InlinedVectorToVector(new_output_dims), new_result_shape); } absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldReshapeOfGather( const Shape& shape, ScalarIndexedConstantArray* operand) { VLOG(3) << "FoldReshapeOfGather(" << ToString(operand) << ")"; TF_ASSIGN_OR_RETURN(ScalarIndexedArray* const operand_without_degenerate_dims, ReshapeToRemoveDegenerateDims(operand)); Shape output_shape_without_degenerate_dims = StripDegenerateDimensions(shape); TF_ASSIGN_OR_RETURN( ScalarIndexedArray* const folded_reshape_without_degenerate_dims, FoldReshapeOfGatherNoDegenerateDims( output_shape_without_degenerate_dims, operand_without_degenerate_dims->as<ScalarIndexedConstantArray>())); if (folded_reshape_without_degenerate_dims == nullptr) { return nullptr; } DimensionVector degenerate_result_dims; for (int64_t i = 0, e = shape.dimensions_size(); i < e; i++) { if (shape.dimensions(i) == 1) { degenerate_result_dims.push_back(i); } } return ReshapeToAddDegenerateDims(folded_reshape_without_degenerate_dims, degenerate_result_dims); } absl::StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldReshapeOfGatherNoDegenerateDims( const Shape& shape, ScalarIndexedConstantArray* scalar_indexed) { VLOG(3) << "FoldReshapeOfGatherNoDegenerateDims(" << ToString(scalar_indexed) << ")"; CHECK(!ShapeUtil::HasDegenerateDimensions(shape)); CHECK(!ShapeUtil::HasDegenerateDimensions(scalar_indexed->shape())); std::vector<ReshapePassthroughDimPair> reshape_passthrough_dims = ComputeReshapePassthroughDimPairs( scalar_indexed->shape().dimensions(), shape.dimensions()); auto is_reshape_passthrough_operand_dim = [&](int64_t operand_dim) { return IsReshapePassthroughOperandDim(reshape_passthrough_dims, operand_dim); }; if (!absl::c_all_of(scalar_indexed->output_dims(), is_reshape_passthrough_operand_dim)) { VLOG(3) << "Not all output dims are passthrough dims " << ToString(scalar_indexed); return nullptr; } std::vector<int64_t> new_scalar_indexed_source_shape( shape.dimensions().begin(), shape.dimensions().end()); for (int64_t i = scalar_indexed->output_dims().size() - 1; i >= 0; i--) { int64_t output_dim = scalar_indexed->output_dims()[i]; int64_t output_dim_after_reshape = MapPassthroughOperandDimToResultDim( reshape_passthrough_dims, output_dim); EraseAt(&new_scalar_indexed_source_shape, output_dim_after_reshape); } const Shape& scalar_indexed_source_shape = scalar_indexed->source()->shape(); int64_t source_dim_for_new_scalar_indexed_node = FindSourcePositionForPassthroughResultDim( scalar_indexed_source_shape.dimensions(), new_scalar_indexed_source_shape, scalar_indexed->source_dim()); if (source_dim_for_new_scalar_indexed_node == -1) { VLOG(3) << "Could not compute the source dim for the new scalar indexed " "node: scalar_indexed_source_shape = [" << StrJoin(scalar_indexed_source_shape.dimensions(), ",") << "] and new_scalar_indexed_source_shape = [" << StrJoin(new_scalar_indexed_source_shape, ",") << "]"; return nullptr; } InsertAt( &new_scalar_indexed_source_shape, source_dim_for_new_scalar_indexed_node, scalar_indexed_source_shape.dimensions(scalar_indexed->source_dim())); CHECK_EQ(absl::c_accumulate(new_scalar_indexed_source_shape, 1LL, std::multiplies<int64_t>()), ShapeUtil::ElementsIn(scalar_indexed_source_shape)); CHECK(IsReshapePassthroughOperandDim( ComputeReshapePassthroughDimPairs( scalar_indexed_source_shape.dimensions(), new_scalar_indexed_source_shape), scalar_indexed->source_dim())); auto map_passthrough_operand_dim_to_result_dim = [&](int64_t result_dim) { return MapPassthroughOperandDimToResultDim(reshape_passthrough_dims, result_dim); }; std::vector<int64_t> output_dims_for_new_scalar_indexed_node; absl::c_transform(scalar_indexed->output_dims(), std::back_inserter(output_dims_for_new_scalar_indexed_node), map_passthrough_operand_dim_to_result_dim); TF_ASSIGN_OR_RETURN(const Literal* new_scalar_indexed_source_literal, TakeOwnership(scalar_indexed->literal().Reshape( new_scalar_indexed_source_shape))); TF_ASSIGN_OR_RETURN( Array * new_scalar_indexed_source, ComputeArrayForConstant(*new_scalar_indexed_source_literal)); return ConstructScalarIndexedArray( new_scalar_indexed_source, scalar_indexed->indices(), source_dim_for_new_scalar_indexed_node, output_dims_for_new_scalar_indexed_node, shape); } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForReshape( const Shape& shape, Array* operand) { if (ShapeUtil::Compatible(operand->shape(), shape)) { return operand; } if (auto* scalar_indexed = dynamic_cast<ScalarIndexedConstantArray*>(operand)) { TF_ASSIGN_OR_RETURN(Analysis::Array * reshape_folded_into_gather, FoldReshapeOfGather(shape, scalar_indexed)); if (reshape_folded_into_gather) { return reshape_folded_into_gather; } } if (auto* constant_array = dynamic_cast<ConstantArray*>(operand)) { TF_ASSIGN_OR_RETURN( Literal* const new_literal, TakeOwnership(constant_array->literal()->Reshape(shape.dimensions()))); return Construct<ConstantArray>(new_literal); } return Construct<ReshapedArray>(operand, shape); } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForElementwiseBinaryOp(HloOpcode opcode, Array* lhs, Array* rhs) { ScalarIndexedConstantArray* lhs_scalar_indexed_const = dynamic_cast<ScalarIndexedConstantArray*>(lhs); ScalarIndexedConstantArray* rhs_scalar_indexed_const = dynamic_cast<ScalarIndexedConstantArray*>(rhs); bool lhs_is_indexed; if (lhs_scalar_indexed_const && !rhs_scalar_indexed_const) { lhs_is_indexed = true; } else if (rhs_scalar_indexed_const && !lhs_scalar_indexed_const) { lhs_is_indexed = false; } else { return nullptr; } ScalarIndexedConstantArray* scalar_indexed_const = lhs_is_indexed ? lhs_scalar_indexed_const : rhs_scalar_indexed_const; UnknownArray* candidate_broadcast_array = dynamic_cast<UnknownArray*>(lhs_is_indexed ? rhs : lhs); if (!candidate_broadcast_array || candidate_broadcast_array->instruction().opcode() != HloOpcode::kBroadcast) { return nullptr; } const HloInstruction* broadcast_instr = &candidate_broadcast_array->instruction(); const HloInstruction* broadcast_const_operand = broadcast_instr->operand(0); if (broadcast_const_operand->opcode() != HloOpcode::kConstant) { return nullptr; } absl::Span<const int64_t> broadcast_dims = broadcast_instr->dimensions(); auto is_broadcasted_dim = [&](int64_t output_dim) { return absl::c_find(broadcast_dims, output_dim) == broadcast_dims.end(); }; if (!absl::c_all_of(scalar_indexed_const->output_dims(), is_broadcasted_dim)) { return nullptr; } enum class IndexComponent { Broadcasted, NotBroadcasted }; std::vector<IndexComponent> simulated_index( broadcast_instr->shape().dimensions_size(), IndexComponent::Broadcasted); for (int64_t broadcast_dim : broadcast_dims) { simulated_index[broadcast_dim] = IndexComponent::NotBroadcasted; } absl::Span<const int64_t> output_dims = scalar_indexed_const->output_dims(); for (int64_t i = output_dims.size() - 1; i >= 0; --i) { CHECK(simulated_index[output_dims[i]] == IndexComponent::Broadcasted); EraseAt(&simulated_index, output_dims[i]); } InsertAt(&simulated_index, scalar_indexed_const->source_dim(), IndexComponent::Broadcasted); std::vector<int64_t> new_inner_broadcast_dims; for (int64_t i = 0; i < simulated_index.size(); i++) { if (simulated_index[i] == IndexComponent::NotBroadcasted) { new_inner_broadcast_dims.push_back(i); } } TF_ASSIGN_OR_RETURN( Literal inner_broadcast_result, broadcast_const_operand->literal().Broadcast( scalar_indexed_const->source()->shape(), new_inner_broadcast_dims)); const Literal* literal_for_new_source; if (lhs_is_indexed) { TF_ASSIGN_OR_RETURN( literal_for_new_source, TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp( opcode, scalar_indexed_const->literal(), inner_broadcast_result))); } else { TF_ASSIGN_OR_RETURN( literal_for_new_source, TakeOwnership(HloEvaluator{}.EvaluateElementwiseBinaryOp( opcode, inner_broadcast_result, scalar_indexed_const->literal()))); } ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source); return Construct<ScalarIndexedConstantArray>( new_source, scalar_indexed_const->indices(), scalar_indexed_const->source_dim(), std::vector<int64_t>(scalar_indexed_const->output_dims().begin(), scalar_indexed_const->output_dims().end()), scalar_indexed_const->shape()); } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForElementwiseUnaryOp(HloOpcode opcode, Array* operand) { auto* scalar_indexed_const = dynamic_cast<ScalarIndexedConstantArray*>(operand); if (scalar_indexed_const == nullptr) { return nullptr; } TF_ASSIGN_OR_RETURN(Literal * literal_for_new_source, TakeOwnership(HloEvaluator{}.EvaluateElementwiseUnaryOp( opcode, scalar_indexed_const->literal()))); ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source); return Construct<ScalarIndexedConstantArray>( new_source, scalar_indexed_const->indices(), scalar_indexed_const->source_dim(), SpanToVector(scalar_indexed_const->output_dims()), scalar_indexed_const->shape()); } namespace { std::optional<int64_t> GetOnlyNonContractingNonBatchDim( int64_t rank, absl::Span<const int64_t> contracting_dims, absl::Span<const int64_t> batch_dims) { std::optional<int64_t> result; for (int64_t dim = 0; dim < rank; dim++) { if (!absl::c_linear_search(contracting_dims, dim) && !absl::c_linear_search(batch_dims, dim)) { if (result.has_value()) { return std::nullopt; } result = dim; } } return result; } bool CanFoldDotIntoIndexedArray( absl::string_view tag, Analysis::ScalarIndexedConstantArray* indexed_array, absl::Span<const int64_t> contracting_dims, absl::Span<const int64_t> batch_dims) { std::optional<int64_t> non_contracting_non_batch_dim = GetOnlyNonContractingNonBatchDim(indexed_array->shape().rank(), contracting_dims, batch_dims); if (!non_contracting_non_batch_dim.has_value()) { VLOG(3) << tag << ": multiple or no non-contracting non-batch dimensions"; return false; } if (indexed_array->output_dims().size() != 1 || indexed_array->output_dims()[0] != *non_contracting_non_batch_dim) { VLOG(3) << tag << ": output dims != the lhs non-contracting non-batch dim"; return false; } int64_t indexed_array_rank = indexed_array->shape().rank(); if (indexed_array->source_dim() < (indexed_array_rank - 2)) { VLOG(3) << tag << ": source dim is not in the low two dims, won't be able to form " "a matmul"; return false; } return true; } } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForDotWithIndexedLhs( const Shape& shape, const DotDimensionNumbers& dim_numbers, const PrecisionConfig& precision_config, ScalarIndexedConstantArray* lhs, ConstantArray* rhs) { VLOG(3) << "ComputeArrayForDotWithIndexedLhs(" << ToString(lhs) << " " << ToString(rhs); if (!CanFoldDotIntoIndexedArray( "ComputeArrayForDotWithIndexedLhs", lhs, dim_numbers.lhs_contracting_dimensions(), dim_numbers.lhs_batch_dimensions())) { return nullptr; } int64_t lhs_rank = lhs->shape().rank(); DotDimensionNumbers new_dim_numbers = dim_numbers; new_dim_numbers.set_lhs_contracting_dimensions( 0, lhs->source_dim() == (lhs_rank - 1) ? (lhs_rank - 2) : (lhs_rank - 1)); TF_ASSIGN_OR_RETURN( Literal * literal_for_new_source, TakeOwnership(HloEvaluator{}.EvaluateDotOp( new_dim_numbers, precision_config, lhs->literal(), *rhs->literal()))); int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() + dim_numbers.rhs_batch_dimensions_size(); ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source); return Construct<ScalarIndexedConstantArray>( new_source, lhs->indices(), new_source_dim, SpanToVector(lhs->output_dims()), shape); } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForDotWithIndexedRhs( const Shape& shape, const DotDimensionNumbers& dim_numbers, const PrecisionConfig& precision_config, ConstantArray* lhs, ScalarIndexedConstantArray* rhs) { VLOG(3) << "ComputeArrayForDotWithIndexedRhs(" << ToString(lhs) << " " << ToString(rhs); if (!CanFoldDotIntoIndexedArray( "ComputeArrayForDotWithIndexedRhs", rhs, dim_numbers.rhs_contracting_dimensions(), dim_numbers.rhs_batch_dimensions())) { return nullptr; } int64_t rhs_rank = rhs->shape().rank(); DotDimensionNumbers new_dim_numbers = dim_numbers; new_dim_numbers.set_rhs_contracting_dimensions( 0, rhs->source_dim() == (rhs_rank - 1) ? (rhs_rank - 2) : (rhs_rank - 1)); TF_ASSIGN_OR_RETURN( Literal * literal_for_new_source, TakeOwnership(HloEvaluator{}.EvaluateDotOp( new_dim_numbers, precision_config, *lhs->literal(), rhs->literal()))); int64_t new_source_dim = dim_numbers.lhs_batch_dimensions_size() + dim_numbers.rhs_batch_dimensions_size() + 1; ConstantArray* new_source = Construct<ConstantArray>(literal_for_new_source); return Construct<ScalarIndexedConstantArray>( new_source, rhs->indices(), new_source_dim, SpanToVector(rhs->output_dims()), shape); } absl::StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForDot( const Shape& shape, const DotDimensionNumbers& dim_numbers, const PrecisionConfig& precision_config, Array* lhs, Array* rhs) { VLOG(3) << "ComputeArrayForDot(" << ToString(lhs) << " " << ToString(rhs); if (auto* lhs_indexed_array = dynamic_cast<ScalarIndexedConstantArray*>(lhs)) { if (auto* rhs_constant = dynamic_cast<ConstantArray*>(rhs)) { return ComputeArrayForDotWithIndexedLhs(shape, dim_numbers, precision_config, lhs_indexed_array, rhs_constant); } } if (auto* rhs_indexed_array = dynamic_cast<ScalarIndexedConstantArray*>(rhs)) { if (auto* lhs_constant = dynamic_cast<ConstantArray*>(lhs)) { return ComputeArrayForDotWithIndexedRhs(shape, dim_numbers, precision_config, lhs_constant, rhs_indexed_array); } } return nullptr; } absl::StatusOr<bool> IndexedArrayAnalysisPrinterPass::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (!VLOG_IS_ON(2)) { return false; } IndexedArrayAnalysis analysis; for (auto* computation : module->MakeNonfusionComputations(execution_threads)) { for (auto* instr : computation->instructions()) { TF_ASSIGN_OR_RETURN(Analysis::Array * t, analysis.GetArrayFor(instr)); if (!dynamic_cast<UnknownArray*>(t) && !dynamic_cast<ConstantArray*>(t)) { VLOG(2) << instr->ToString() << " -> " << analysis.ToString(t); } } } return false; } }
#include "xla/service/indexed_array_analysis.h" #include <gtest/gtest.h> #include "absl/log/log.h" #include "absl/strings/ascii.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { class IndexedArrayAnalysisTest : public HloTestBase { protected: void AssertArrayForRootExpressionIs(const std::string& hlo_text, const std::string& root_expression) { AssertArrayForRootExpressionIsImpl(hlo_text, root_expression, false); } void AssertArrayWithConstantsForRootExpressionIs( const std::string& hlo_text, const std::string& root_expression) { AssertArrayForRootExpressionIsImpl(hlo_text, root_expression, true); } private: std::string CanonicalizeWhitespace(const std::string& text) { std::string result; for (char c : text) { if (!absl::ascii_isspace(c)) { result.push_back(c); } else if (!result.empty() && result.back() != ' ') { result.push_back(' '); } } while (!result.empty() && result.back() == ' ') { result.pop_back(); } return result; } void AssertArrayForRootExpressionIsImpl(const std::string& hlo_text, const std::string& root_expression, bool print_constants) { IndexedArrayAnalysis indexed_tensor_analysis; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m, ParseAndReturnVerifiedModule(hlo_text)); TF_ASSERT_OK_AND_ASSIGN(IndexedArrayAnalysis::Array* const array_result, indexed_tensor_analysis.GetArrayFor( m->entry_computation()->root_instruction())); std::string string_result = CanonicalizeWhitespace( indexed_tensor_analysis.ToString(array_result, print_constants)); LOG(INFO) << string_result; ASSERT_EQ(string_result, CanonicalizeWhitespace(root_expression)); } }; TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneGather) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,3] parameter(0) indices = s32[5] parameter(1) ROOT gather = s32[5,3] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3} } )"; AssertArrayForRootExpressionIs(hlo_text, "(scalar-indexed %operand %indices 0->[0])"); } TEST_F(IndexedArrayAnalysisTest, SimpleOneToOneConstantGather) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}}) indices = s32[5] parameter(0) ROOT gather = s32[5,3] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3} } )"; AssertArrayForRootExpressionIs( hlo_text, "(scalar-indexed-const (constant s32[3,3]) %indices 0->[0])"); } TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed0) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}}) indices = s32[5,2] parameter(0) ROOT gather = s32[5] gather(operand, indices), offset_dims={}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1} } )"; AssertArrayForRootExpressionIs(hlo_text, "%gather"); } TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed1) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,3,1] parameter(0) indices = s32[5] parameter(1) ROOT gather = s32[5,3] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0,2}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3,1} } )"; AssertArrayForRootExpressionIs(hlo_text, "%gather"); } TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed2) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,3,1] parameter(0) indices = s32[5] parameter(1) ROOT gather = s32[5,2,3] gather(operand, indices), offset_dims={1,2}, collapsed_slice_dims={2}, start_index_map={0}, index_vector_dim=1, slice_sizes={2,3,1} } )"; AssertArrayForRootExpressionIs(hlo_text, "%gather"); } TEST_F(IndexedArrayAnalysisTest, GatherIsNotScalarIndexed3) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,3] parameter(0) indices = s32[5] parameter(1) ROOT gather = s32[5,2] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,2} } )"; AssertArrayForRootExpressionIs(hlo_text, "%gather"); } TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOne) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,3] constant({{1,2,3},{1,2,3},{1,2,3}}) indices_a = s32[5] parameter(0) indices_b = s32[2] parameter(1) gather_a = s32[5,3] gather(operand, indices_a), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3} ROOT gather_b = s32[2,3] gather(gather_a, indices_b), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3} } )"; AssertArrayForRootExpressionIs( hlo_text, "(scalar-indexed-const (constant s32[3,3]) (scalar-indexed %indices_a " "%indices_b 0->[0]) 0->[0])"); } TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithOneToOne) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,2] parameter(0) indices_a = s32[5,7] parameter(1) indices_b = s32[2] parameter(2) gather_a = s32[5,3,7] gather(operand, indices_a), offset_dims={1}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=2, slice_sizes={3,1} ROOT gather_b = s32[5,3,2] gather(gather_a, indices_b), offset_dims={0,1}, collapsed_slice_dims={2}, start_index_map={2}, index_vector_dim=1, slice_sizes={5,3,1} } )"; AssertArrayForRootExpressionIs(hlo_text, "(scalar-indexed %operand (scalar-indexed " "%indices_a %indices_b 1->[1]) 1->[0,2])"); } TEST_F(IndexedArrayAnalysisTest, GatherOfGather_OneToOneWithManyToOne) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,6] parameter(0) indices_a = s32[2] parameter(1) indices_b = s32[5,7] parameter(2) gather_a = s32[2,6] gather(operand, indices_a), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,6} ROOT gather_b = s32[5,6,7] gather(gather_a, indices_b), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,6} } )"; AssertArrayForRootExpressionIs(hlo_text, "(scalar-indexed %operand (scalar-indexed " "%indices_a %indices_b 0->[0,1]) 0->[0,2])"); } TEST_F(IndexedArrayAnalysisTest, GatherOfGather_ManyToOneWithManyToOne) { std::string hlo_text = R"( HloModule SimpleGather ENTRY main { operand = s32[3,2] parameter(0) indices_a = s32[5,7] parameter(1) indices_b = s32[4,8] parameter(2) gather_a = s32[5,3,7] gather(operand, indices_a), offset_dims={1}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=2, slice_sizes={3,1} ROOT gather_b = s32[4,5,3,8] gather(gather_a, indices_b), offset_dims={1,2}, collapsed_slice_dims={2}, start_index_map={2}, index_vector_dim=2, slice_sizes={5,3,1} } )"; AssertArrayForRootExpressionIs( hlo_text, "(scalar-indexed %operand (scalar-indexed %indices_a %indices_b " "1->[0,2]) 1->[0,1,3])"); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather0) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}}) indices = s32[5] parameter(0) gather = s32[5,4] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT reshape = s32[5,2,2] reshape(gather) } )"; AssertArrayForRootExpressionIs( hlo_text, "(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0])"); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather1) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}}) indices = s32[5,7] parameter(0) gather = s32[5,4,7] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,4} ROOT reshape = s32[5,2,2,7] reshape(gather) } )"; AssertArrayForRootExpressionIs( hlo_text, "(scalar-indexed-const (constant s32[3,2,2]) %indices 0->[0,3])"); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather2) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[3,2,6] constant({ {{1,2,3,4,5,6},{1,2,3,4,5,6}}, {{1,2,3,4,5,6},{1,2,3,4,5,6}}, {{1,2,3,4,5,6},{1,2,3,4,5,6}}}) indices = s32[5,7] parameter(0) gather = s32[5,2,6,7] gather(operand, indices), offset_dims={1,2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,2,6} ROOT reshape = s32[5,3,4,7] reshape(gather) } )"; AssertArrayForRootExpressionIs( hlo_text, "(scalar-indexed-const (constant s32[3,3,4]) %indices 0->[0,3])"); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather3) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[2,6] constant({ {1,2,3,4,5,6},{1,2,3,4,5,6}}) indices = s32[1] parameter(0) gather = s32[1,6] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,6} ROOT reshape = s32[1,1,6] reshape(gather) } )"; const char* expected_root_expression = R"( (scalar-indexed-const (constant s32[2,1,1,6]) (reshape %indices to s32[]) 0->[]) )"; AssertArrayForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather4) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 1, 2, 3 } }) i.0 = s64[1,3]{1,0} parameter(0) g.0 = s32[1,3,3]{2,1,0} gather(operand, i.0), offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,3} i.1 = s64[1] parameter(1) g.1 = s32[1,1,3]{2,1,0} gather(g.0, i.1), offset_dims={0,2}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={1,1,3} ROOT reshape = s32[1,3]{1,0} reshape(g.1) } )"; const char* expected_root_expression = R"( (scalar-indexed-const (constant s32[2,1,3]) (reshape (scalar-indexed %i.0 %i.1 1->[1]) to s64[]) 0->[]) )"; AssertArrayForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather5) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[1,6] constant({{1,2,3,4,5,6}}) indices = s32[1] parameter(0) gather = s32[1,6] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,6} ROOT reshape = s32[1,1,6] reshape(gather) } )"; const char* expected_root_expression = R"( (scalar-indexed-const (constant s32[1,1,1,6]) (reshape %indices to s32[]) 0->[]) )"; AssertArrayForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather6) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[1,2,6] constant({{ {1,2,3,4,5,6},{1,2,3,4,5,6}}}) indices = s32[1] parameter(0) gather = s32[1,1,6] gather(operand, indices), offset_dims={1,2}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={1,1,6} ROOT reshape = s32[1,1,1,6] reshape(gather) } )"; const char* expected_root_expression = R"( (scalar-indexed-const (constant s32[2,1,1,1,6] s32[2,1,1,1,6] { { { { { 1, 2, 3, 4, 5, 6 } } } }, { { { { 1, 2, 3, 4, 5, 6 } } } } }) (reshape %indices to s32[]) 0->[]) )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGather7) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[2,6] constant({ {1,2,3,4,5,6},{1,2,3,4,5,6}}) indices = s32[1,5] parameter(0) gather = s32[1,5,6] gather(operand, indices), offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,6} ROOT reshape = s32[1,1,5,6] reshape(gather) } )"; const char* expected_root_expression = R"( (scalar-indexed-const (constant s32[2,1,1,6] s32[2,1,1,6] { { { { 1, 2, 3, 4, 5, 6 } } }, { { { 1, 2, 3, 4, 5, 6 } } } }) (reshape %indices to s32[5]) 0->[2]) )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold0) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[3,4] constant({{1,2,3,4},{1,2,3,4},{1,2,3,4}}) indices = s32[5,6] parameter(0) gather = s32[5,4,6] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,4} ROOT reshape = s32[5,2,2,2,3] reshape(gather) } )"; const char* expected_root_expression = R"( (reshape (scalar-indexed-const (constant s32[3,4]) %indices 0->[0,2]) to s32[5,2,2,2,3]) )"; AssertArrayForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold1) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[3,5,2] constant({ {{1,2},{3,4},{5,6},{7,8},{9,10}}, {{1,2},{3,4},{5,6},{7,8},{9,10}}, {{1,2},{3,4},{5,6},{7,8},{9,10}}}) indices = s32[7] parameter(0) gather = s32[3,2,7] gather(operand, indices), offset_dims={0,1}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={3,1,2} ROOT reshape = s32[6,7] reshape(gather) } )"; const char* expected_root_expression = R"( (reshape (scalar-indexed-const (constant s32[3,5,2]) %indices 1->[2]) to s32[6,7]) )"; AssertArrayForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, ReshapeOfGatherNoFold2) { std::string hlo_text = R"( HloModule ReshapeOfGather ENTRY main { operand = s32[3,4,1] constant({ {{1},{2},{3},{4}}, {{1},{2},{3},{4}}, {{1},{2},{3},{4}}}) indices = s32[5,6] parameter(0) gather = s32[5,4,6,1] gather(operand, indices), offset_dims={1,3}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,4,1} ROOT reshape = s32[5,2,2,2,3,1] reshape(gather) } )"; const char* expected_root_expression = R"( (reshape (scalar-indexed-const (constant s32[3,4,1]) %indices 0->[0,2]) to s32[5,2,2,2,3,1]) )"; AssertArrayForRootExpressionIs(hlo_text, expected_root_expression); } TEST_F(IndexedArrayAnalysisTest, UnaryOpOfGather) { std::string hlo_text = R"( HloModule UnaryOpOfGather ENTRY main { operand = f32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}}) indices = s32[5] parameter(0) gather = f32[5,4] gather(operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT tanh = f32[5,4] tanh(gather) } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant f32[3,4] f32[3,4] { { 0.761594176, 0.964027584, 0.995054781, 0.999329329 }, { 0.761594176, 0.995054781, 0.964027584, 0.999329329 }, { 0.999329329, 0.995054781, 0.964027584, 0.761594176 } }) %indices 0->[0]))"); } TEST_F(IndexedArrayAnalysisTest, AddBroadcastedScalarWithGather) { std::string hlo_text = R"( HloModule AddBroadcastedScalarWithGather ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}}) constant = s32[] constant(5) constant_broadcasted = s32[5,4] broadcast(constant), dimensions={} indices = s32[5] parameter(0) gather = s32[5,4] gather(gather_operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT add = s32[5,4] add(gather, constant_broadcasted) } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[3,4] s32[3,4] { { 6, 7, 8, 9 }, { 6, 8, 7, 9 }, { 9, 8, 7, 6 } }) %indices 0->[0]))"); } TEST_F(IndexedArrayAnalysisTest, SubtractBroadcastedScalarWithGather_GatherIsLhs) { std::string hlo_text = R"( HloModule SubtractBroadcastedScalarWithGather ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}}) constant = s32[] constant(5) constant_broadcasted = s32[5,4] broadcast(constant), dimensions={} indices = s32[5] parameter(0) gather = s32[5,4] gather(gather_operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT sub = s32[5,4] subtract(gather, constant_broadcasted) } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[3,4] s32[3,4] { { -4, -3, -2, -1 }, { -4, -2, -3, -1 }, { -1, -2, -3, -4 } }) %indices 0->[0]))"); } TEST_F(IndexedArrayAnalysisTest, SubtractBroadcastedScalarWithGather_GatherIsRhs) { std::string hlo_text = R"( HloModule SubtractBroadcastedScalarWithGather ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}}) constant = s32[] constant(5) constant_broadcasted = s32[5,4] broadcast(constant), dimensions={} indices = s32[5] parameter(0) gather = s32[5,4] gather(gather_operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT sub = s32[5,4] subtract(constant_broadcasted, gather) } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[3,4] s32[3,4] { { 4, 3, 2, 1 }, { 4, 2, 3, 1 }, { 1, 2, 3, 4 } }) %indices 0->[0]))"); } TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather) { std::string hlo_text = R"( HloModule AddBroadcastedVectorWithGather ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}}) constant_vect = s32[4] constant({10,11,12,13}) constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={1} indices = s32[5] parameter(0) gather = s32[5,4] gather(gather_operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT add = s32[5,4] add(gather, constant_broadcasted) } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[3,4] s32[3,4] { { 11, 13, 15, 17 }, { 11, 14, 14, 17 }, { 14, 14, 14, 14 } }) %indices 0->[0]))"); } TEST_F(IndexedArrayAnalysisTest, AddBroadcastedVectorWithGather_Negative) { std::string hlo_text = R"( HloModule AddBroadcastedVectorWithGather ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{1,3,2,4},{4,3,2,1}}) constant_vect = s32[5] constant({10,11,12,13,14}) constant_broadcasted = s32[5,4] broadcast(constant_vect), dimensions={0} indices = s32[5] parameter(0) gather = s32[5,4] gather(gather_operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT add = s32[5,4] add(gather, constant_broadcasted) } )"; AssertArrayForRootExpressionIs(hlo_text, "%add"); } TEST_F(IndexedArrayAnalysisTest, RegularUnaryOp) { std::string hlo_text = R"( HloModule RegularUnaryOp ENTRY main { input = f32[100] parameter(0) ROOT tanh = f32[100] tanh(input) } )"; AssertArrayForRootExpressionIs(hlo_text, "%tanh"); } TEST_F(IndexedArrayAnalysisTest, RegularBinaryOp) { std::string hlo_text = R"( HloModule RegularUnaryOp ENTRY main { input0 = f32[100] parameter(0) input1 = f32[100] parameter(1) ROOT add = f32[100] add(input0, input1) } )"; AssertArrayForRootExpressionIs(hlo_text, "%add"); } TEST_F(IndexedArrayAnalysisTest, DotOpBasic_0) { std::string hlo_text = R"( HloModule DotOp ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}}) dot_rhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}}) indices = s32[5] parameter(0) dot_lhs = s32[5,4] gather(gather_operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,4} ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[3,3] s32[3,3] { { 70, 80, 90 }, { 158, 184, 210 }, { 246, 288, 330 } }) %indices 0->[0]))"); } TEST_F(IndexedArrayAnalysisTest, DotOpBasic_1) { std::string hlo_text = R"( HloModule DotOp ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}}) dot_rhs_constant = s32[3,3] constant({{1,2,3},{4,5,6},{7,8,9}}) indices = s32[5] parameter(0) dot_lhs = s32[3,5] gather(gather_operand, indices), offset_dims={0}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={3,1} ROOT dot = s32[5,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={0}, rhs_contracting_dims={0} } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[4,3] s32[4,3] { { 84, 99, 114 }, { 96, 114, 132 }, { 108, 129, 150 }, { 120, 144, 168 } }) %indices 0->[1]))"); } TEST_F(IndexedArrayAnalysisTest, DotOpBasic_2) { std::string hlo_text = R"( HloModule DotOp ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}}) dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}}) indices = s32[5] parameter(0) dot_rhs = s32[3,5] gather(gather_operand, indices), offset_dims={0}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={3,1} ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[4,4] s32[4,4] { { 38, 44, 50, 56 }, { 83, 98, 113, 128 }, { 128, 152, 176, 200 }, { 173, 206, 239, 272 } }) %indices 1->[1]) )"); } TEST_F(IndexedArrayAnalysisTest, DotOpBasic_3) { std::string hlo_text = R"( HloModule DotOp ENTRY main { gather_operand = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}}) dot_lhs_constant = s32[4,3] constant({{1,2,3},{4,5,6},{7,8,9},{10,11,12}}) indices = s32[5] parameter(0) dot_rhs = s32[5,3] gather(gather_operand, indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3} ROOT dot = s32[4,5] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[4,4] s32[4,4] { { 14, 32, 50, 68 }, { 32, 77, 122, 167 }, { 50, 122, 194, 266 }, { 68, 167, 266, 365 } }) %indices 1->[0]) )"); } TEST_F(IndexedArrayAnalysisTest, DotOpWithBatch) { std::string hlo_text = R"( HloModule DotOp ENTRY main { gather_operand = s32[2,3,2] constant({{{1,2},{3,4},{5,6}},{{7,8},{9,10},{11,12}}}) dot_lhs_constant = s32[2,2,3] constant({{{1,2,3},{4,5,6}},{{7,8,9},{10,11,12}}}) indices = s32[4] parameter(0) dot_rhs = s32[2,3,4] gather(gather_operand, indices), offset_dims={0,1}, collapsed_slice_dims={2}, start_index_map={2}, index_vector_dim=1, slice_sizes={2,3,1} ROOT dot = s32[2,2,4] dot(dot_lhs_constant, dot_rhs), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, R"( (scalar-indexed-const (constant s32[2,2,2] s32[2,2,2] { { { 22, 28 }, { 49, 64 } }, { { 220, 244 }, { 301, 334 } } }) %indices 3->[2]) )"); } TEST_F(IndexedArrayAnalysisTest, DotOpNegative) { std::string hlo_text = R"( HloModule DotOp ENTRY main { gather_operand = s32[3,4] constant({{1,2,3,4},{5,6,7,8},{9,10,11,12}}) dot_rhs_constant = s32[2,3] constant({{1,2,3},{4,5,6}}) indices = s32[2] parameter(0) dot_lhs = s32[3,2] gather(gather_operand, indices), offset_dims={0}, collapsed_slice_dims={1}, start_index_map={1}, index_vector_dim=1, slice_sizes={3,1} ROOT dot = s32[3,3] dot(dot_lhs, dot_rhs_constant), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; AssertArrayWithConstantsForRootExpressionIs(hlo_text, "%dot"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/indexed_array_analysis_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0246551a-2064-4f3b-ad55-2a13cb8a1a45
cpp
tensorflow/tensorflow
all_gather_broadcast_reorder
third_party/xla/xla/service/all_gather_broadcast_reorder.cc
third_party/xla/xla/service/all_gather_broadcast_reorder_test.cc
#include "xla/service/all_gather_broadcast_reorder.h" #include <cstdint> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" namespace xla { absl::StatusOr<bool> AllGatherBroadcastReorder::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { if (hlo_query::ContainsLayoutConstrainedCollective(*module, HloOpcode::kAllGather)) { VLOG(1) << "Skip AllGatherBroadcastReorder because the module contains " "all-gather with constrained layouts"; return false; } int64_t next_channel_id = hlo_query::NextChannelId(*module); bool changed = false; for (auto computation : module->computations(execution_threads)) { for (HloInstruction *inst : computation->MakeInstructionPostOrder()) { if (inst->opcode() != HloOpcode::kAllGather || !inst->shape().IsArray() || inst->operand(0)->opcode() != HloOpcode::kBroadcast) { continue; } HloAllGatherInstruction *ag = Cast<HloAllGatherInstruction>(inst); HloBroadcastInstruction *bcast = Cast<HloBroadcastInstruction>(inst->mutable_operand(0)); absl::flat_hash_set<int64_t> non_uniform_dims; non_uniform_dims.insert(bcast->dimensions().begin(), bcast->dimensions().end()); const bool all_gather_along_uniform_dim = non_uniform_dims.insert(ag->all_gather_dimension()).second; int64_t uniform_dim_size = 1; for (int64_t i = 0; i < ag->shape().rank(); ++i) { if (non_uniform_dims.count(i) == 0) { uniform_dim_size *= ag->shape().dimensions(i); } } if (uniform_dim_size == 1) { continue; } HloInstruction *replacement; const int64_t ag_dim = ag->all_gather_dimension(); if (!all_gather_along_uniform_dim) { VLOG(2) << "All-gather along non uniform dimension"; auto ag_dim_index = PositionInContainer(bcast->dimensions(), ag_dim); Shape new_ag_shape = bcast->operand(0)->shape(); new_ag_shape.set_dimensions(ag_dim_index, ag->shape().dimensions(ag_dim)); auto *new_ag = Cast<HloAllGatherInstruction>(computation->AddInstruction( ag->CloneWithNewOperands(new_ag_shape, bcast->operands()))); if (ag->channel_id()) { new_ag->set_channel_id(next_channel_id++); } new_ag->set_all_gather_dimension(ag_dim_index); replacement = computation->AddInstruction( bcast->CloneWithNewOperands(ag->shape(), {new_ag})); } else { VLOG(2) << "All-gather along uniform dimension"; HloInstruction *x = bcast->mutable_operand(0); std::vector<int64_t> shape_dims{1}; absl::Span<const int64_t> x_dims = x->shape().dimensions(); shape_dims.insert(shape_dims.end(), x_dims.begin(), x_dims.end()); Shape shape = ShapeUtil::MakeShape(x->shape().element_type(), shape_dims); HloInstruction *rs0 = computation->AddInstruction( HloInstruction::CreateReshape(shape, x)); const int64_t ag_factor = ag->shape().dimensions(ag_dim) / ag->operand(0)->shape().dimensions(ag_dim); shape.set_dimensions(0, ag_factor); auto *new_ag = Cast<HloAllGatherInstruction>(computation->AddInstruction( ag->CloneWithNewOperands(shape, {rs0}))); if (ag->channel_id()) { new_ag->set_channel_id(next_channel_id++); } new_ag->set_all_gather_dimension(0); std::vector<int64_t> bcast_shape_dims = SpanToVector(ag->shape().dimensions()); bcast_shape_dims[ag_dim] = ag_factor; bcast_shape_dims.insert(bcast_shape_dims.begin() + ag_dim + 1, ag->shape().dimensions(ag_dim) / ag_factor); Shape bcast_shape = ShapeUtil::MakeShape(x->shape().element_type(), bcast_shape_dims); std::vector<int64_t> bcast_dims; bcast_dims.push_back(ag_dim); for (int64_t d : bcast->dimensions()) { bcast_dims.push_back(d + (d > ag_dim)); } HloInstruction *bcast = computation->AddInstruction( HloInstruction::CreateBroadcast(bcast_shape, new_ag, bcast_dims)); replacement = computation->AddInstruction( HloInstruction::CreateReshape(ag->shape(), bcast)); } TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(replacement)); TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ag)); changed = true; } } return changed; } }
#include "xla/service/all_gather_broadcast_reorder.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace m = xla::testing::opcode_matchers; class AllGatherBroadcastReorderTest : public HloTestBase { public: enum class PassOutput { NoChange, NonUniformAGPattern, UniformAGPattern }; void RunPass(absl::string_view hlo_module, PassOutput expected_output) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_module)); auto changed = AllGatherBroadcastReorder().Run(module.get()); ASSERT_TRUE(changed.ok()); if (expected_output == PassOutput::NoChange) { EXPECT_FALSE(changed.value()); } else { EXPECT_TRUE(changed.value()); if (expected_output == PassOutput::NonUniformAGPattern) { EXPECT_THAT(module->entry_computation()->root_instruction(), m::Broadcast(m::AllGather(m::Parameter()))); } else { EXPECT_THAT( module->entry_computation()->root_instruction(), m::Reshape(m::Broadcast(m::AllGather(m::Reshape(m::Parameter()))))); } } } }; TEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongNonUniformDim) { absl::string_view hlo_string = R"( HloModule m ENTRY main { x = f32[128, 5] parameter(0) bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0} ROOT ag = f32[5, 4, 8, 256] all-gather(bc), dimensions={3}, replica_groups={{0, 1}} } )"; RunPass(hlo_string, PassOutput::NonUniformAGPattern); } TEST_F(AllGatherBroadcastReorderTest, Simple_GatherAlongUniformDim) { absl::string_view hlo_string = R"( HloModule m ENTRY main { x = f32[128, 5] parameter(0) bc = f32[5, 4, 8, 128] broadcast(x), dimensions={3, 0} ROOT ag = f32[5, 12, 8, 128] all-gather(bc), dimensions={1}, replica_groups={{0, 1, 2}} } )"; RunPass(hlo_string, PassOutput::UniformAGPattern); } TEST_F(AllGatherBroadcastReorderTest, Simple_GatherBroadcastScalar) { absl::string_view hlo_string = R"( HloModule m ENTRY main { x = f32[] parameter(0) bc = f32[4, 8] broadcast(x), dimensions={} ROOT ag = f32[12, 8] all-gather(bc), dimensions={0}, replica_groups={{0, 1, 2}} } )"; RunPass(hlo_string, PassOutput::UniformAGPattern); } TEST_F(AllGatherBroadcastReorderTest, T5Test) { absl::string_view hlo_string = R"( HloModule m ENTRY main { x = f32[128] parameter(0) bc = f32[1,4,84,128]{3,2,1,0} broadcast(x), dimensions={3} ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(bc), channel_id=6, replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true } )"; RunPass(hlo_string, PassOutput::UniformAGPattern); } TEST_F(AllGatherBroadcastReorderTest, FailedMatch) { absl::string_view hlo_string = R"( HloModule m ENTRY main { x = f32[1,4,84,128] parameter(0) ROOT ag = f32[8,4,84,128]{3,2,1,0} all-gather(x), channel_id=6, replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, use_global_device_ids=true } )"; RunPass(hlo_string, PassOutput::NoChange); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_broadcast_reorder.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_broadcast_reorder_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9aa7f320-8708-44ab-8106-6d86e1782605
cpp
tensorflow/tensorflow
space_to_batch_converter
third_party/xla/xla/service/space_to_batch_converter.cc
third_party/xla/xla/service/space_to_batch_converter_test.cc
#include "xla/service/space_to_batch_converter.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <iterator> #include <map> #include <memory> #include <queue> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/algorithm.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/debug_options_flags.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/service/shape_inference.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/tsl/lib/core/bitmap.h" #include "xla/types.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" namespace xla { namespace { namespace m = match; constexpr int64_t kNumMappedDims = 3; class ConvolutionVisitor { public: absl::Status PerformSpaceToBatchOnConvolution(HloInstruction* convolution); struct ConvDetails { std::vector<int64_t> spatial_dimensions_to_split; int64_t inherent_low_padding, inherent_high_padding, stride, spatial_size, base_dilation_factor, halo_size, high_padding_for_conv, low_padding_for_conv, kernel_spatial_dim_size, input_dim_size; }; ConvDetails GetConvolutionDetails(HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers); std::pair<std::vector<int64_t>, std::vector<int64_t>> GetSpatialDimsToSplit( HloInstruction* old_operand); bool IsForwardWindowDilatedConv(HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers); bool CanPropagate(HloInstruction* consumer, HloInstruction* producer); bool IsBroadcastTree(HloInstruction* op, HloInstruction* consumer, std::vector<HloInstruction*>& instructions_to_transform); void RewriteBroadcastTree( HloInstruction* producer, std::vector<HloInstruction*>& instructions_to_transform); void PropagateOnBroadcast(HloInstruction* consumer, HloInstruction* producer); bool IsOpcodeNonPropagatable(HloInstruction* consumer); bool SupportedOpForPropagation(HloInstruction* consumer, HloInstruction* producer); bool SupportedDotForPropagation(HloInstruction* consumer, HloInstruction* producer); bool IsBroadcastPropagatable(HloInstruction* broadcast, HloInstruction* old_other_op); absl::StatusOr<bool> Propagate(HloInstruction* consumer, HloInstruction* producer); absl::StatusOr<std::pair<HloInstruction*, std::vector<int64_t>>> SplitSpace( HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers, int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding, int64_t spatial_split_size, int64_t num_splits, std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop = false, bool is_rhs = false); absl::StatusOr<HloInstruction*> PerformSplitSpace( HloInstruction* activations, absl::Span<const int64_t> spatial_dimensions_to_split, int64_t activations_batch_dim, int64_t spatial_split_size, int64_t num_splits); absl::StatusOr<HloInstruction*> TransposeAndMergeBatch( HloInstruction* activations, absl::Span<const int64_t> final_split_spatial_dim_positioning, int64_t activations_batch_dim, int64_t old_batch_size); absl::StatusOr<HloInstruction*> PadAndSplitSpace( HloInstruction* activations, absl::Span<const int64_t> spatial_dimensions_to_split, int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding, int64_t spatial_split_size, int64_t num_splits); absl::StatusOr<HloInstruction*> PropagateOnConstant(HloInstruction* consumer, HloInstruction* producer); absl::Status PropagateOnConv(HloInstruction* convolution); absl::Status PropagateOnConcat(HloInstruction* concat); absl::Status PropagateOnReverse(HloInstruction* reverse); absl::Status PropagateOnPad(HloInstruction* pad); absl::Status PropagateOnSlice(HloInstruction* slice); absl::Status PropagateOnBackpropFilterConv(HloInstruction* convolution); bool IsConvSuitableForSpaceToBatch(HloInstruction* convolution); bool IsThisBackPropFilterConv(HloInstruction* convolution); absl::Status PropagateOnUsers(HloInstruction* old_conv); absl::StatusOr<HloInstruction*> SelectValidPortion( HloInstruction* new_instr, HloInstruction* old_instr, HloInstruction* select_val, int64_t new_batch_dim, absl::Span<const int64_t> new_space_dims, int64_t old_batch_dim, absl::Span<const int64_t> old_space_dims); struct SpaceNextToBatchDetails { HloInstruction* instr; std::vector<int64_t> transpose_dims; }; absl::StatusOr<SpaceNextToBatchDetails> BringSpaceNextToBatch( HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers, int64_t& activations_batch_dim, std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop = false, bool is_rhs = false); absl::StatusOr<HloInstruction*> ChangeSpatialSizeOnSpaceToBatchedShape( HloInstruction* activations, int64_t batch_dimension, int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions_to_split, int64_t new_spatial_dim_size, bool increase_spatial_size = false); absl::StatusOr<HloInstruction*> SplitAndTransposeMergedBatch( HloInstruction* activations, int64_t batch_dimension, int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions); absl::StatusOr<HloInstruction*> BatchToSpace(HloInstruction* old_instr); absl::StatusOr<HloInstruction*> HaloDuplicateWithSlice( HloInstruction* activations, absl::Span<const int64_t> spatial_dimensions_to_split, int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size, HloInstruction* pad_val = nullptr); absl::StatusOr<bool> Run(); const bool changed() const { return changed_; } ~ConvolutionVisitor() = default; explicit ConvolutionVisitor(SpaceToBatchController ctrl, HloComputation* computation); int64_t GetFirstChosenSpatialDim(HloInstruction* convolution) { const int64_t dim_count = ctrl_.count_of_dimensions_to_convert; const int64_t end_point = convolution->convolution_dimension_numbers() .input_spatial_dimensions_size() - ctrl_.dimension_from_end_to_convert; return end_point - dim_count + 1; } std::vector<int64_t> GetChosenSpatialDims(HloInstruction* convolution) { const int64_t dim_count = ctrl_.count_of_dimensions_to_convert; const int64_t first_dim = GetFirstChosenSpatialDim(convolution); std::vector<int64_t> dims(dim_count); for (int i = 0; i < dim_count; ++i) { dims[i] = convolution->convolution_dimension_numbers().input_spatial_dimensions( first_dim + i); } return dims; } int64_t DimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) { return permute_dims[id]; } int DimMapper(SpaceToBatchDimMap s) { return static_cast<int>(s); } int64_t ReverseDimLookUp(absl::Span<const int64_t> permute_dims, int64_t id) { return std::distance(permute_dims.begin(), absl::c_find(permute_dims, id)); } HloInstruction* DoesConvolutionFeedReduceWindowOrSelectAndScatter( HloInstruction* instr, int64_t depth); bool DoesConvolutionFeedUnpropagatableOp( HloInstruction* instr, int64_t depth = kUnpropagatableOpSearchDepth); bool IsSpaceToBatchedSpaceSizeSuitable(HloInstruction* instr); private: HloComputation* computation_; absl::flat_hash_set<HloInstruction*> convs_to_visit_; std::vector<HloInstruction*> conv_visitor_list_; HloInstructionSet non_propagatable_instrs_; absl::flat_hash_map<HloInstruction*, HloInstruction*> batch_to_space_map_; absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_instrs_; absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> instr_to_dim_map_; absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> instr_to_dim_permute_map_; absl::flat_hash_map<HloInstruction*, absl::flat_hash_set<HloInstruction*>> broadcast_map_; bool changed_ = false; static constexpr int64_t kReduceWindowSearchDepth = 10; static constexpr int64_t kUnpropagatableOpSearchDepth = 3; static constexpr int64_t kMultiplierOnSpaceForBaseDilation = 3; absl::flat_hash_map<std::pair<HloInstruction*, int64_t>, bool> unpropagatability_cache_; SpaceToBatchController ctrl_; }; ConvolutionVisitor::ConvolutionVisitor(SpaceToBatchController ctrl, HloComputation* computation) { ctrl_ = ctrl; computation_ = computation; for (HloInstruction* inst : computation->MakeInstructionPostOrder()) { if (inst->opcode() != HloOpcode::kConvolution) { continue; } auto convolution = inst; if (!IsConvSuitableForSpaceToBatch(convolution)) { VLOG(1) << "Conv not suitable for space-to-batch " << convolution->ToString(); continue; } VLOG(1) << "Conv added to space-to-batch worklist " << convolution->ToString(); convs_to_visit_.insert(convolution); conv_visitor_list_.push_back(convolution); } } std::pair<std::vector<int64_t>, std::vector<int64_t>> ConvolutionVisitor::GetSpatialDimsToSplit(HloInstruction* old_operand) { auto new_operand = old_to_new_instrs_[old_operand]; auto dim_map_val = instr_to_dim_map_[old_operand]; auto permute_dims = instr_to_dim_permute_map_[new_operand]; std::vector<int64_t> old_dims(ctrl_.count_of_dimensions_to_convert), new_dims(ctrl_.count_of_dimensions_to_convert); old_dims[0] = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)]; new_dims[0] = DimLookUp(permute_dims, old_dims[0]); for (int i = 1; i < ctrl_.count_of_dimensions_to_convert; ++i) { old_dims[i] = old_dims[0] + i; new_dims[i] = new_dims[0] + i; } return std::make_pair(old_dims, new_dims); } bool ConvolutionVisitor::IsForwardWindowDilatedConv( HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) { const int64_t window_dilation_factor = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .window_dilation(); if (window_dilation_factor == 1) { return false; } const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); return convolution->operand(1)->shape().dimensions(kernel_spatial_dim) < convolution->shape().dimensions(output_spatial_dim); } bool ConvolutionVisitor::IsConvSuitableForSpaceToBatch( HloInstruction* convolution) { ConvolutionDimensionNumbers dim_numbers = convolution->convolution_dimension_numbers(); if (GetFirstChosenSpatialDim(convolution) < 0) { return false; } if (convolution->batch_group_count() != 1) { return false; } if (convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .window_dilation() != 1) { if (!IsForwardWindowDilatedConv(convolution, dim_numbers)) { return false; } } const ConvDetails c = GetConvolutionDetails(convolution, dim_numbers); const int64_t low_pad = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .padding_low(); if (c.base_dilation_factor != 1) { if (!ctrl_.enable_propagations_on_base_dilations) { return false; } if (c.stride != 1) { return false; } if (low_pad == 0) { if (c.kernel_spatial_dim_size != 1) { return false; } } else if (low_pad != c.base_dilation_factor - 1 && low_pad != c.base_dilation_factor) { return false; } } int64_t activations_batch_dim = dim_numbers.input_batch_dimension(); const int64_t old_batch_size = convolution->operand(0)->shape().dimensions(activations_batch_dim); if (old_batch_size > ctrl_.limit_on_batch_size) { return false; } VLOG(1) << "spatial size " << c.spatial_size << " halo size " << c.halo_size; if (c.halo_size > CeilOfRatio(c.spatial_size, ctrl_.number_of_splits)) { return false; } if (c.base_dilation_factor > 1 && c.inherent_low_padding == c.base_dilation_factor) { if (c.spatial_size < kMultiplierOnSpaceForBaseDilation * ctrl_.number_of_splits) { return false; } } VLOG(1) << "Legal space-to-batch convolution " << convolution->ToString(); return true; } bool ConvolutionVisitor::IsThisBackPropFilterConv(HloInstruction* convolution) { auto activations = convolution->mutable_operand(0); auto kernel = convolution->mutable_operand(1); auto dim_numbers = convolution->convolution_dimension_numbers(); if (!old_to_new_instrs_.contains(kernel) && !old_to_new_instrs_.contains(activations)) { return false; } if (old_to_new_instrs_.contains(kernel)) { auto dim_map_val_op_0 = instr_to_dim_map_[kernel]; const int64_t old_batch_dim = dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]; if (convolution->convolution_dimension_numbers() .kernel_input_feature_dimension() != old_batch_dim) { return false; } } if (old_to_new_instrs_.contains(activations)) { auto dim_map_val_op_0 = instr_to_dim_map_[activations]; const int64_t old_batch_dim = dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]; if (dim_numbers.input_feature_dimension() != old_batch_dim) { return false; } } return true; } absl::StatusOr<HloInstruction*> ConvolutionVisitor::HaloDuplicateWithSlice( HloInstruction* activations, absl::Span<const int64_t> spatial_dimensions_to_split, int64_t activations_batch_dim, int64_t low_padding, int64_t halo_size, HloInstruction* pad_val) { const int64_t spatial_dim_count = spatial_dimensions_to_split.size(); const int64_t additional_batch_size = IPow<int64_t>(ctrl_.number_of_splits, spatial_dim_count); const int64_t original_batch_size = activations->shape().dimensions(activations_batch_dim) / additional_batch_size; const int64_t spatial_split_size = activations->shape().dimensions(spatial_dimensions_to_split[0]); const int64_t batch_size = ctrl_.number_of_splits; TF_ASSIGN_OR_RETURN( activations, SplitAndTransposeMergedBatch( activations, activations_batch_dim, original_batch_size, spatial_dimensions_to_split)); const int64_t rank = activations->shape().rank(); VLOG(1) << "In HaloDuplicateWithSlice with activations " << activations->ToString() << " batch_size " << batch_size << " spatial_split_size " << spatial_split_size << " low_padding " << low_padding << " halo size " << halo_size; CHECK_LE(std::abs(halo_size - low_padding), spatial_split_size); for (int64_t i = 0; i < spatial_dimensions_to_split.size(); ++i) { int64_t spatial_dimension_to_split = activations_batch_dim + 2 * (i + 1); int64_t remapped_batch_dimension = spatial_dimension_to_split - 1; HloInstruction* first_slice = nullptr; std::vector<int64_t> strides(rank, 1); HloInstruction* padding = pad_val == nullptr ? activations->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(activations->shape().element_type()))) : pad_val; if (low_padding > 0) { std::vector<int64_t> start_indices(rank, 0), end_indices(activations->shape().dimensions().begin(), activations->shape().dimensions().end()); start_indices[spatial_dimension_to_split] = spatial_split_size - low_padding; end_indices[remapped_batch_dimension] = batch_size - 1; end_indices[spatial_dimension_to_split] = spatial_split_size; TF_ASSIGN_OR_RETURN(first_slice, MakeSliceHlo(activations, start_indices, end_indices, strides, &activations->metadata(), &activations->frontend_attributes())); VLOG(1) << "first slice " << first_slice->ToString(); PaddingConfig padding_config = MakeNoPaddingConfig(first_slice->shape().dimensions_size()); padding_config.mutable_dimensions(remapped_batch_dimension) ->set_edge_padding_low(1); TF_ASSIGN_OR_RETURN(first_slice, MakePadHlo(first_slice, padding, padding_config, &first_slice->metadata(), &first_slice->frontend_attributes())); } HloInstruction* halo_region = nullptr; if (halo_size - low_padding > 0) { std::vector<int64_t> start_indices_halo(rank, 0), end_indices_halo(activations->shape().dimensions().begin(), activations->shape().dimensions().end()); start_indices_halo[remapped_batch_dimension] = 1; end_indices_halo[spatial_dimension_to_split] = halo_size - low_padding; TF_ASSIGN_OR_RETURN( halo_region, MakeSliceHlo(activations, start_indices_halo, end_indices_halo, strides, &activations->metadata(), &activations->frontend_attributes())); VLOG(1) << "halo_region " << halo_region->ToString(); PaddingConfig padding_config_halo = MakeNoPaddingConfig(halo_region->shape().dimensions_size()); padding_config_halo.mutable_dimensions(remapped_batch_dimension) ->set_edge_padding_high(1); TF_ASSIGN_OR_RETURN(halo_region, MakePadHlo(halo_region, padding, padding_config_halo, &halo_region->metadata(), &halo_region->frontend_attributes())); } if ((halo_size == 0 && low_padding != 0) || low_padding < 0) { std::vector<int64_t> start_indices_activations_cut(rank, 0), end_indices_activations_cut(activations->shape().dimensions().begin(), activations->shape().dimensions().end()); if (low_padding > 0) { end_indices_activations_cut[spatial_dimension_to_split] = spatial_split_size - low_padding; } else { start_indices_activations_cut[spatial_dimension_to_split] = 0 - low_padding; end_indices_activations_cut[spatial_dimension_to_split] = spatial_split_size; } TF_ASSIGN_OR_RETURN( activations, MakeSliceHlo(activations, start_indices_activations_cut, end_indices_activations_cut, strides, &activations->metadata(), &activations->frontend_attributes())); } if (first_slice != nullptr) { TF_ASSIGN_OR_RETURN( activations, MakeConcatHlo({first_slice, activations}, spatial_dimension_to_split, &activations->metadata(), &activations->frontend_attributes())); } if (halo_region != nullptr) { TF_ASSIGN_OR_RETURN( activations, MakeConcatHlo({activations, halo_region}, spatial_dimension_to_split, &activations->metadata(), &activations->frontend_attributes())); } } TF_ASSIGN_OR_RETURN( activations, TransposeAndMergeBatch( activations, spatial_dimensions_to_split, activations_batch_dim, original_batch_size)); VLOG(1) << "HaloDuplicated activations " << activations->ToString(); return activations; } absl::StatusOr<ConvolutionVisitor::SpaceNextToBatchDetails> ConvolutionVisitor::BringSpaceNextToBatch( HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers, int64_t& activations_batch_dim, std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop, bool is_rhs) { for (int64_t i = 1; i < spatial_dimensions_to_split->size(); ++i) { CHECK_EQ(spatial_dimensions_to_split->at(i), spatial_dimensions_to_split->at(i - 1) + 1) << "Spatial dimensions are not contiguous"; } int64_t spatial_dimension_to_split = spatial_dimensions_to_split->at(0); std::vector<int64_t> transpose_dims(activations->shape().rank()); if (spatial_dimension_to_split == activations_batch_dim + 1) { absl::c_iota(transpose_dims, 0); } else { ConvolutionDimensionNumbers new_dim_numbers = dim_numbers; int64_t pushed_counter = 0; int64_t new_batch_dim, new_spatial_dim; int64_t dim_counter = 0; if (is_rhs) { CHECK(is_backprop); for (int i = 0; i < activations->shape().rank(); ++i) { if (i == activations_batch_dim) { continue; } if (i == spatial_dimension_to_split) { transpose_dims[dim_counter++] = activations_batch_dim; new_batch_dim = pushed_counter; pushed_counter++; new_spatial_dim = pushed_counter; } if (i == dim_numbers.kernel_output_feature_dimension()) { new_dim_numbers.set_kernel_output_feature_dimension(pushed_counter); } else { auto it = absl::c_find(dim_numbers.kernel_spatial_dimensions(), i); if (it != dim_numbers.kernel_spatial_dimensions().end()) { int64_t j = it - dim_numbers.kernel_spatial_dimensions().begin(); new_dim_numbers.set_kernel_spatial_dimensions(j, pushed_counter); } } transpose_dims[dim_counter++] = i; pushed_counter++; } activations_batch_dim = new_batch_dim; spatial_dimension_to_split = new_spatial_dim; TF_ASSIGN_OR_RETURN(activations, MakeTransposeHlo(activations, transpose_dims)); new_dim_numbers.set_kernel_input_feature_dimension(activations_batch_dim); } else { for (int i = 0; i < activations->shape().rank(); ++i) { if (i == activations_batch_dim) { continue; } if (i == spatial_dimension_to_split) { transpose_dims[dim_counter++] = activations_batch_dim; new_batch_dim = pushed_counter; pushed_counter++; new_spatial_dim = pushed_counter; } if (is_backprop && i == dim_numbers.input_batch_dimension()) { new_dim_numbers.set_input_batch_dimension(pushed_counter); } else if (i == dim_numbers.input_feature_dimension()) { new_dim_numbers.set_input_feature_dimension(pushed_counter); } else { auto it = absl::c_find(dim_numbers.input_spatial_dimensions(), i); if (it != dim_numbers.input_spatial_dimensions().end()) { int64_t j = it - dim_numbers.input_spatial_dimensions().begin(); new_dim_numbers.set_input_spatial_dimensions(j, pushed_counter); } } transpose_dims[dim_counter++] = i; pushed_counter++; } activations_batch_dim = new_batch_dim; spatial_dimension_to_split = new_spatial_dim; TF_ASSIGN_OR_RETURN(activations, MakeTransposeHlo(activations, transpose_dims)); if (is_backprop) { new_dim_numbers.set_input_feature_dimension(activations_batch_dim); } else { new_dim_numbers.set_input_batch_dimension(activations_batch_dim); } } dim_numbers = new_dim_numbers; } for (int64_t i = 0; i < spatial_dimensions_to_split->size(); ++i) { (*spatial_dimensions_to_split)[i] = spatial_dimension_to_split + i; } return SpaceNextToBatchDetails{activations, transpose_dims}; } absl::StatusOr<HloInstruction*> ConvolutionVisitor::SplitAndTransposeMergedBatch( HloInstruction* activations, int64_t batch_dimension, int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions) { CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]); std::vector<int64_t> new_dimensions(activations->shape().dimensions().begin(), activations->shape().dimensions().end()); const int64_t new_batch_size = activations->shape().dimensions(batch_dimension); VLOG(3) << "Decreasing the spatial size while propagating new_batch_size " << new_batch_size << " old_batch_size " << old_batch_size; new_dimensions[batch_dimension] = old_batch_size; const int64_t spatial_dim_count = spatial_dimensions.size(); for (int64_t i = 0; i < spatial_dim_count; ++i) { new_dimensions.insert(new_dimensions.begin() + spatial_dimensions[0], ctrl_.number_of_splits); } TF_ASSIGN_OR_RETURN(HloInstruction * batch_split_activations, MakeReshapeHlo(new_dimensions, activations)); if (spatial_dim_count > 1) { std::vector<int64_t> transpose_dims(new_dimensions.size()); absl::c_iota(transpose_dims, 0); std::vector<int64_t> trans_dims(new_dimensions.size()); absl::c_iota(trans_dims, 0); int64_t start_batch_dim_position = batch_dimension + 1; int64_t start_space_dim_position = batch_dimension + 2; for (int i = 0; i < spatial_dim_count; ++i) { transpose_dims[start_batch_dim_position + 2 * i] = batch_dimension + spatial_dim_count - i; transpose_dims[start_space_dim_position + 2 * i] = batch_dimension + spatial_dim_count + 1 + i; } TF_ASSIGN_OR_RETURN( batch_split_activations, MakeTransposeHlo(batch_split_activations, transpose_dims)); } return batch_split_activations; } absl::StatusOr<HloInstruction*> ConvolutionVisitor::ChangeSpatialSizeOnSpaceToBatchedShape( HloInstruction* activations, int64_t batch_dimension, int64_t old_batch_size, absl::Span<const int64_t> spatial_dimensions, int64_t new_spatial_dim_size, bool increase_spatial_size) { CHECK_EQ(batch_dimension + 1, spatial_dimensions[0]); std::vector<int64_t> new_dimensions(activations->shape().dimensions().begin(), activations->shape().dimensions().end()); const int64_t spatial_dim_count = spatial_dimensions.size(); const int64_t spatial_dim_size = activations->shape().dimensions(spatial_dimensions[0]); const int64_t reshaped_space_size = spatial_dim_size * ctrl_.number_of_splits; TF_ASSIGN_OR_RETURN( HloInstruction * batch_split_activations, SplitAndTransposeMergedBatch(activations, batch_dimension, old_batch_size, spatial_dimensions)); std::vector<int64_t> batch_space_collapse_reshape_dims( batch_split_activations->shape().dimensions().begin(), batch_split_activations->shape().dimensions().end()); batch_space_collapse_reshape_dims.erase( batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0], batch_space_collapse_reshape_dims.begin() + spatial_dimensions[0] + spatial_dim_count); for (auto spatial_dimension : spatial_dimensions) { batch_space_collapse_reshape_dims[spatial_dimension] = reshaped_space_size; } TF_ASSIGN_OR_RETURN(HloInstruction * batch_space_collapsed_reshape, MakeReshapeHlo(batch_space_collapse_reshape_dims, batch_split_activations)); VLOG(3) << "First reshape done"; const int64_t rank = activations->shape().rank(); if (increase_spatial_size) { PaddingConfig padding_config = MakeNoPaddingConfig( batch_space_collapsed_reshape->shape().dimensions_size()); for (auto spatial_dimension : spatial_dimensions) { padding_config.mutable_dimensions(spatial_dimension) ->set_edge_padding_high(new_spatial_dim_size * ctrl_.number_of_splits - reshaped_space_size); padding_config.mutable_dimensions(spatial_dimension) ->set_edge_padding_low(0); } HloInstruction* padding = activations->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero( batch_space_collapsed_reshape->shape().element_type()))); TF_ASSIGN_OR_RETURN( batch_space_collapsed_reshape, MakePadHlo(batch_space_collapsed_reshape, padding, padding_config, &batch_space_collapsed_reshape->metadata(), &batch_space_collapsed_reshape->frontend_attributes())); } else { std::vector<int64_t> start_indices(rank, 0), end_indices(batch_space_collapsed_reshape->shape().dimensions().begin(), batch_space_collapsed_reshape->shape().dimensions().end()), strides(rank, 1); for (auto spatial_dimension : spatial_dimensions) { end_indices[spatial_dimension] = new_spatial_dim_size * ctrl_.number_of_splits; } TF_ASSIGN_OR_RETURN( batch_space_collapsed_reshape, MakeSliceHlo(batch_space_collapsed_reshape, start_indices, end_indices, strides, &batch_space_collapsed_reshape->metadata(), &batch_space_collapsed_reshape->frontend_attributes())); } TF_ASSIGN_OR_RETURN( HloInstruction * activations_new, PerformSplitSpace(batch_space_collapsed_reshape, spatial_dimensions, batch_dimension, new_spatial_dim_size, ctrl_.number_of_splits)); VLOG(3) << "Size decreased activations " << activations_new->ToString(); return activations_new; } absl::StatusOr<bool> ConvolutionVisitor::Run() { for (auto conv : conv_visitor_list_) { if (ctrl_.disable_starting_on_small_chains && DoesConvolutionFeedUnpropagatableOp(conv)) { VLOG(1) << "Giving up on conv " << conv->ToString() << " because it feeds an unpropagatable op"; convs_to_visit_.erase(conv); } if (convs_to_visit_.count(conv) > 0) { TF_CHECK_OK(PerformSpaceToBatchOnConvolution(conv)); changed_ = true; } } conv_visitor_list_.clear(); convs_to_visit_.clear(); for (auto instr : non_propagatable_instrs_) { if (instr->opcode() == HloOpcode::kConvolution) { VLOG(1) << "Instr " << instr->ToString(); } if (instr->opcode() == HloOpcode::kConvolution && !IsConvSuitableForSpaceToBatch(instr)) { HloInstruction* producer = nullptr; if (old_to_new_instrs_.contains(instr->mutable_operand(0))) { producer = instr->mutable_operand(0); } else if (old_to_new_instrs_.contains(instr->mutable_operand(1))) { producer = instr->mutable_operand(1); } if (producer) { if (CanPropagate(instr, producer)) { bool needs_further_propagation; TF_ASSIGN_OR_RETURN(needs_further_propagation, Propagate(instr, producer)); TF_CHECK_OK(computation_->ReplaceInstruction( instr, old_to_new_instrs_[instr])); continue; } } } VLOG(1) << "Could not eventually propagate through " << instr->ToString(); absl::flat_hash_map<int64_t, HloInstruction*> operand_map; for (int64_t i = 0; i < instr->operand_count(); ++i) { if (old_to_new_instrs_.count(instr->mutable_operand(i))) { TF_ASSIGN_OR_RETURN(operand_map[i], BatchToSpace(instr->mutable_operand(i))); } } for (auto entry : operand_map) { TF_CHECK_OK(instr->ReplaceOperandWith(entry.first, entry.second)); } } non_propagatable_instrs_.clear(); return changed_; } bool IsTrivialElementwise(HloInstruction* hlo) { if (hlo->opcode() == HloOpcode::kFusion || hlo->opcode() == HloOpcode::kRng || hlo->opcode() == HloOpcode::kCopy || hlo->opcode() == HloOpcode::kConstant || hlo->opcode() == HloOpcode::kIota || hlo->opcode() == HloOpcode::kMap) { return false; } return hlo->IsElementwise(); } bool ConvolutionVisitor::CanPropagate(HloInstruction* consumer, HloInstruction* producer) { if (IsTrivialElementwise(consumer)) { VLOG(2) << "Doing propagation check on elementwise op: " << consumer->ToString(); HloInstruction* pivot_operand = nullptr; for (int64_t i = 0; i < consumer->operand_count(); ++i) { auto old_producer = consumer->mutable_operand(i); std::vector<HloInstruction*> to_transform; const bool broadcast_or_constant = (old_producer->opcode() == HloOpcode::kConstant) || (old_producer->opcode() == HloOpcode::kBroadcast && IsBroadcastPropagatable(old_producer, producer)) || (consumer->IsElementwiseBinary() && old_producer->opcode() == HloOpcode::kBroadcast && IsBroadcastTree(old_producer, producer, to_transform)); if (!old_to_new_instrs_.contains(old_producer) && !broadcast_or_constant) { VLOG(1) << "Cannot propagate on elementwise op " << consumer->ToString() << " because operand " << old_producer->ToString() << " isn't ready "; return false; } else { if (broadcast_or_constant) { VLOG(2) << "Skipping on " << old_producer->ToString(); continue; } CHECK(old_to_new_instrs_.contains(old_producer)); CHECK(instr_to_dim_map_.contains(old_producer)); if (pivot_operand == nullptr) { pivot_operand = old_producer; VLOG(2) << "Elementwise op: pivot " << old_producer->ToString(); } else { if (instr_to_dim_map_[pivot_operand] [DimMapper(SpaceToBatchDimMap::kBatch)] != instr_to_dim_map_[old_producer] [DimMapper(SpaceToBatchDimMap::kBatch)] || instr_to_dim_map_[pivot_operand] [DimMapper(SpaceToBatchDimMap::kSpace0)] != instr_to_dim_map_[old_producer] [DimMapper(SpaceToBatchDimMap::kSpace0)]) { VLOG(2) << "Elementwise op: checking for shape equivalence " << consumer->ToString() << " failed due to changed batch space ordering "; return false; } auto pivot_new_instr = old_to_new_instrs_[pivot_operand]; auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr]; auto new_instr = old_to_new_instrs_[old_producer]; auto permute_dims = instr_to_dim_permute_map_[new_instr]; for (int j = 0; j < pivot_permute_dims.size(); ++j) { if (pivot_permute_dims[j] != permute_dims[j]) { VLOG(2) << "Elementwise op: checking for shape equivalence " << consumer->ToString() << " failed due to permuted dimensions "; return false; } if (pivot_new_instr->shape().dimensions(j) != new_instr->shape().dimensions(j)) { if (!((consumer->IsElementwiseBinary() || consumer->opcode() == HloOpcode::kSelect) && j == instr_to_dim_map_[pivot_operand][DimMapper( SpaceToBatchDimMap::kSpace0)])) { VLOG(2) << "Elementwise op: checking for shape equivalence " << consumer->ToString() << " failed due to changed shape sizes "; return false; } } } } } } } if (consumer->opcode() == HloOpcode::kConcatenate) { for (int64_t i = 0; i < consumer->operand_count(); ++i) { if (!instr_to_dim_map_.contains(consumer->mutable_operand(i))) { return false; } } auto pivot_operand = consumer->mutable_operand(0); auto pivot_new_instr = old_to_new_instrs_[pivot_operand]; auto pivot_permute_dims = instr_to_dim_permute_map_[pivot_new_instr]; for (int64_t i = 1; i < consumer->operand_count(); ++i) { auto new_instr = old_to_new_instrs_[consumer->mutable_operand(i)]; auto permute_dims = instr_to_dim_permute_map_[new_instr]; for (int j = 0; j < pivot_permute_dims.size(); ++j) { if (pivot_permute_dims[j] != permute_dims[j]) { VLOG(2) << "Concat op: checking for shape equivalence " << consumer->ToString() << " failed due to permuted dimensions "; return false; } if (pivot_new_instr->shape().dimensions(j) != new_instr->shape().dimensions(j)) { VLOG(2) << "Concat op: checking for shape equivalence " << consumer->ToString() << " failed due to changed shape sizes "; return false; } } } return true; } if (consumer->opcode() == HloOpcode::kConvolution) { if (!ConsumeFuel("space-to-batch-converter", [&] { return "Skipping space-to-batch propagation because fuel over\n"; })) { return false; } auto are_conv_dims_compatible = [&](const ConvolutionDimensionNumbers dim_numbers, std::vector<int64_t>& dim_map, bool check_lhs) { if (check_lhs) { if (dim_numbers.input_spatial_dimensions( GetFirstChosenSpatialDim(consumer)) != dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) { return false; } for (int i = 0; i < dim_numbers.input_spatial_dimensions().size(); ++i) { if (dim_numbers.input_spatial_dimensions(i) == dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] || dim_numbers.input_spatial_dimensions(i) == dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) { return false; } } } else { if (dim_numbers.kernel_spatial_dimensions( GetFirstChosenSpatialDim(consumer)) != dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)]) { return false; } for (int i = 0; i < dim_numbers.kernel_spatial_dimensions().size(); ++i) { if (dim_numbers.kernel_spatial_dimensions(i) == dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] || dim_numbers.kernel_spatial_dimensions(i) == dim_map[DimMapper(SpaceToBatchDimMap::kFeature)]) { return false; } } } return true; }; VLOG(1) << "Checking if conv is supported for propagation " << consumer->ToString(); bool found_good_non_window_dilated_conv = true; if (IsConvSuitableForSpaceToBatch(consumer)) { if (!old_to_new_instrs_.contains(consumer->mutable_operand(0))) { found_good_non_window_dilated_conv = false; } ConvolutionDimensionNumbers dim_numbers = consumer->convolution_dimension_numbers(); ConvDetails c = GetConvolutionDetails(consumer, dim_numbers); auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0)); std::vector<int64_t> new_spatial_dims = retval.second; auto new_activations = old_to_new_instrs_[consumer->mutable_operand(0)]; if (new_activations->shape().dimensions(retval.second[0]) < c.inherent_low_padding) { return false; } auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)]; if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(), dim_map_val_op_0, true)) { found_good_non_window_dilated_conv = false; } if (consumer->convolution_dimension_numbers().input_batch_dimension() != dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]) { found_good_non_window_dilated_conv = false; } if (found_good_non_window_dilated_conv) { return true; } } if (!ctrl_.enable_propagations_on_window_dilations) { return false; } if (!IsThisBackPropFilterConv(consumer)) { return false; } if (GetFirstChosenSpatialDim(consumer) < 0) { return false; } if (consumer->window() .dimensions(GetFirstChosenSpatialDim(consumer)) .stride() != 1) { return false; } if (consumer->feature_group_count() != 1) { return false; } VLOG(2) << "Checking for backprop filter conv propagatability"; CHECK_EQ(consumer->operand_count(), 2); auto activations = consumer->mutable_operand(0); auto kernel = consumer->mutable_operand(1); auto win_dims = consumer->window().dimensions(GetFirstChosenSpatialDim(consumer)); const int64_t rhs_dilation = win_dims.window_dilation(); const int64_t lhs_dilation = win_dims.base_dilation(); if (lhs_dilation != 1) { return false; } if (rhs_dilation == 1 && !ctrl_.enable_propagations_on_trivial_window_dilations) { if (!old_to_new_instrs_.contains(kernel) || !old_to_new_instrs_.contains(activations)) { return false; } } if (!old_to_new_instrs_.contains(kernel)) { const int64_t rhs_batch = kernel->shape().dimensions(consumer->convolution_dimension_numbers() .kernel_input_feature_dimension()); auto dim_map_val_op_0 = instr_to_dim_map_[activations]; const int64_t old_batch_dim = dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)]; auto first_operand = old_to_new_instrs_[activations]; auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand]; const int64_t new_batch_dim = DimLookUp(permute_dims_first_operand, old_batch_dim); const int64_t new_space_dim = DimLookUp(permute_dims_first_operand, old_space_dim); const int64_t lhs_batch = first_operand->shape().dimensions(new_batch_dim); if (first_operand->shape().dimensions(new_space_dim) % rhs_dilation != 0) { return false; } if (rhs_batch * ctrl_.number_of_splits != lhs_batch) { return false; } if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(), dim_map_val_op_0, true)) { return false; } VLOG(2) << "Backprop filter conv ready for propagation: activations ready, " " kernel will be space-to-batched"; return true; } if (!old_to_new_instrs_.contains(activations)) { const int64_t lhs_batch = activations->shape().dimensions( consumer->convolution_dimension_numbers().input_feature_dimension()); auto dim_map_val_op_1 = instr_to_dim_map_[consumer->mutable_operand(1)]; const int64_t old_batch_dim = dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)]; auto second_operand = old_to_new_instrs_[kernel]; auto permute_dims_second_operand = instr_to_dim_permute_map_[second_operand]; const int64_t new_batch_dim = DimLookUp(permute_dims_second_operand, old_batch_dim); const int64_t rhs_batch = second_operand->shape().dimensions(new_batch_dim); if (rhs_batch != ctrl_.number_of_splits * lhs_batch) { return false; } if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(), dim_map_val_op_1, false)) { return false; } VLOG(2) << "Backprop filter conv ready for propagation: kernel ready, " " activations will be space-to-batched"; return true; } auto first_operand = old_to_new_instrs_[activations]; auto dim_map_val_op_0 = instr_to_dim_map_[activations]; auto second_operand = old_to_new_instrs_[kernel]; auto dim_map_val_op_1 = instr_to_dim_map_[kernel]; auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand]; auto permute_dims_second_operand = instr_to_dim_permute_map_[second_operand]; const int64_t new_batch_dim_operand_0 = DimLookUp(permute_dims_first_operand, dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]); const int64_t new_space_dim_operand_0 = DimLookUp(permute_dims_first_operand, dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)]); const int64_t new_batch_dim_operand_1 = DimLookUp(permute_dims_second_operand, dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kBatch)]); const int64_t new_space_dim_operand_1 = DimLookUp(permute_dims_second_operand, dim_map_val_op_1[DimMapper(SpaceToBatchDimMap::kSpace0)]); if (first_operand->shape().dimensions(new_batch_dim_operand_0) != second_operand->shape().dimensions(new_batch_dim_operand_1)) { VLOG(2) << "Backprop filter conv not ready for propagation because batch " "dimensions don't line up"; return false; } if (first_operand->shape().dimensions(new_space_dim_operand_0) > rhs_dilation * second_operand->shape().dimensions(new_space_dim_operand_1)) { VLOG(2) << "Backprop filter conv not ready for propagation because of " "dilation factor mismatch"; return false; } if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(), dim_map_val_op_0, true)) { return false; } if (!are_conv_dims_compatible(consumer->convolution_dimension_numbers(), dim_map_val_op_1, false)) { return false; } VLOG(2) << "Backprop filter conv ready for propagation"; return true; } if (consumer->opcode() == HloOpcode::kReduceWindow || consumer->opcode() == HloOpcode::kReduce) { for (int64_t i = 0; i < consumer->operand_count(); ++i) { auto old_producer = consumer->mutable_operand(i); if (i == 0 && !old_to_new_instrs_.contains(old_producer)) { return false; } } if (consumer->opcode() == HloOpcode::kReduceWindow) { return IsSpaceToBatchedSpaceSizeSuitable(consumer); } } if (consumer->opcode() == HloOpcode::kSelectAndScatter) { for (int64_t i = 0; i < consumer->operand_count(); ++i) { auto old_producer = consumer->mutable_operand(i); if (i < 2 && !old_to_new_instrs_.contains(old_producer)) { return false; } } auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)]; auto dim_map_val_op_0 = instr_to_dim_map_[consumer->mutable_operand(0)]; auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)]; auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand]; auto permute_dims_second_operand = instr_to_dim_permute_map_[second_operand]; if (permute_dims_first_operand != permute_dims_second_operand) { VLOG(2) << "Can't propagate through select and scatter due to " "permutation mismatch"; return false; } const int64_t old_batch_dim = dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = dim_map_val_op_0[DimMapper(SpaceToBatchDimMap::kSpace0)]; const int64_t new_batch_dim = DimLookUp(permute_dims_first_operand, old_batch_dim); const int64_t new_space_dim = DimLookUp(permute_dims_first_operand, old_space_dim); if (first_operand->shape().dimensions(new_batch_dim) != second_operand->shape().dimensions(new_batch_dim)) { VLOG(2) << "Can't propagate through select and scatter due to dim mismatch"; return false; } const int64_t stride = consumer->window().dimensions(old_space_dim).stride(); const int64_t pad_high = consumer->window().dimensions(old_space_dim).padding_high(); const int64_t pad_low = consumer->window().dimensions(old_space_dim).padding_low(); if ((first_operand->shape().dimensions(new_space_dim) + pad_high + pad_low) / stride != second_operand->shape().dimensions(new_space_dim)) { VLOG(2) << "Can't propagate through select and scatter due to stride " "mismatch"; return false; } return IsSpaceToBatchedSpaceSizeSuitable(consumer); } return true; } void ConvolutionVisitor::PropagateOnBroadcast(HloInstruction* consumer, HloInstruction* producer) { auto new_producer = old_to_new_instrs_[producer]; auto permute_dims = instr_to_dim_permute_map_[new_producer]; auto dim_map_val = instr_to_dim_map_[producer]; const int64_t old_batch_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)]; auto orig_broadcast_dims = consumer->dimensions(); bool batch_is_broadcasted = absl::c_linear_search(orig_broadcast_dims, old_batch_dim); const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim); const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim); bool map_found = broadcast_map_.contains(consumer); if (map_found) { for (auto previous_broadcast : broadcast_map_[consumer]) { if (ShapeUtil::CompatibleIgnoringElementType(previous_broadcast->shape(), new_producer->shape())) { return; } } } std::vector<int64_t> final_shape_dims( new_producer->shape().dimensions().begin(), new_producer->shape().dimensions().end()); if (batch_is_broadcasted) { final_shape_dims[new_batch_dim] = producer->shape().dimensions(old_batch_dim); final_shape_dims[new_space_dim] *= ctrl_.number_of_splits; } std::vector<int64_t> broadcast_dims; const auto& dimensions = consumer->dimensions(); broadcast_dims.reserve(dimensions.size()); for (auto j : dimensions) { broadcast_dims.push_back(DimLookUp(permute_dims, j)); } auto new_broadcast = MakeBroadcastHlo( consumer->mutable_operand(0), broadcast_dims, final_shape_dims, &consumer->metadata(), &consumer->frontend_attributes()); VLOG(1) << "Created broadcast " << new_broadcast->ToString(); if (batch_is_broadcasted) { new_broadcast = MakeReshapeHlo(new_producer->shape().dimensions(), new_broadcast) .value(); VLOG(2) << "Created reshape of broadcast " << new_broadcast->ToString(); } if (!map_found) { absl::flat_hash_set<HloInstruction*> set_of_broadcasts; broadcast_map_[consumer] = set_of_broadcasts; } broadcast_map_[consumer].insert(new_broadcast); } void ConvolutionVisitor::RewriteBroadcastTree( HloInstruction* producer, std::vector<HloInstruction*>& instructions_to_transform) { CHECK(old_to_new_instrs_.contains(producer)); for (auto instr : instructions_to_transform) { if (instr->opcode() == HloOpcode::kBroadcast) { PropagateOnBroadcast(instr, producer); } else if (IsTrivialElementwise(instr)) { Propagate(instr, instr->mutable_operand(0)).value(); } else { LOG(FATAL) << "Unsupported opcode in RewriteBroadcastTree"; } } } bool ConvolutionVisitor::IsBroadcastTree( HloInstruction* op, HloInstruction* consumer, std::vector<HloInstruction*>& instructions_to_transform) { if (op->opcode() == HloOpcode::kBroadcast) { if (IsBroadcastPropagatable(op, consumer)) { instructions_to_transform.push_back(op); return true; } else { return false; } } if (Match(op, m::ConstantScalar())) { return true; } if (!IsTrivialElementwise(op)) { return false; } for (int64_t i = 0; i < op->operand_count(); ++i) { if (!IsBroadcastTree(op->mutable_operand(i), consumer, instructions_to_transform)) { return false; } } instructions_to_transform.push_back(op); return true; } bool ConvolutionVisitor::IsBroadcastPropagatable(HloInstruction* broadcast, HloInstruction* old_other_op) { CHECK_EQ(broadcast->opcode(), HloOpcode::kBroadcast); CHECK(instr_to_dim_map_.contains(old_other_op)); auto result = instr_to_dim_map_[old_other_op]; const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; auto broadcast_dims = broadcast->dimensions(); return !absl::c_linear_search(broadcast_dims, space_dim); } bool ConvolutionVisitor::IsOpcodeNonPropagatable(HloInstruction* consumer) { switch (consumer->opcode()) { case HloOpcode::kCustomCall: return true; default: return false; } } bool ConvolutionVisitor::SupportedDotForPropagation(HloInstruction* consumer, HloInstruction* producer) { if (consumer->opcode() != HloOpcode::kDot) { return false; } auto operand = consumer->mutable_operand(0); if (operand != producer || !instr_to_dim_map_.contains(operand)) { return false; } const auto& dnums = consumer->dot_dimension_numbers(); const auto& contracting_dims = dnums.lhs_contracting_dimensions(); const auto& batch_dims = dnums.lhs_batch_dimensions(); auto result = instr_to_dim_map_[operand]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; const int64_t old_feature_dim = result[DimMapper(SpaceToBatchDimMap::kFeature)]; if (consumer->operand(1)->shape().rank() == batch_dims.size() + contracting_dims.size()) { return false; } bool found = false; for (auto dim : batch_dims) { if (dim == old_batch_dim || dim == old_space_dim) { return false; } if (dim == old_feature_dim) { found = true; } } if (!found) { return false; } for (auto dim : contracting_dims) { if (dim == old_batch_dim || dim == old_space_dim) { return false; } } return true; } bool ConvolutionVisitor::SupportedOpForPropagation(HloInstruction* consumer, HloInstruction* producer) { if (IsOpcodeNonPropagatable(consumer)) { return false; } if (IsTrivialElementwise(consumer)) { for (int64_t i = 0; i < consumer->operand_count(); ++i) { if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) { if (!IsBroadcastPropagatable(consumer->mutable_operand(i), producer)) { VLOG(2) << "Could not propagate through broadcast"; return false; } } } return true; } if (consumer->opcode() == HloOpcode::kConvolution) { return true; } if (consumer->opcode() == HloOpcode::kConcatenate) { HloInstruction* pivot_operand = nullptr; for (int64_t i = 0; i < consumer->operand_count(); ++i) { if (instr_to_dim_map_.contains(consumer->mutable_operand(i))) { pivot_operand = consumer->mutable_operand(i); break; } } if (pivot_operand == nullptr) { VLOG(1) << "Concat: Dim map not found on any operand"; return false; } auto result = instr_to_dim_map_[pivot_operand]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; if (consumer->concatenate_dimension() == old_batch_dim || consumer->concatenate_dimension() == old_space_dim) { return false; } return true; } if (consumer->opcode() == HloOpcode::kReverse) { auto operand_0 = consumer->mutable_operand(0); if (!instr_to_dim_map_.contains(operand_0)) { return false; } auto result = instr_to_dim_map_[operand_0]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; for (auto dim : consumer->dimensions()) { if (dim == old_batch_dim || dim == old_space_dim) { return false; } } return true; } if (consumer->opcode() == HloOpcode::kTranspose) { return true; } if (consumer->opcode() == HloOpcode::kPad) { auto operand_0 = consumer->mutable_operand(0); if (!instr_to_dim_map_.contains(operand_0)) { return false; } auto result = instr_to_dim_map_[operand_0]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; auto does_dim_have_padding = [](PaddingConfig padding_config, int64_t dim) { return padding_config.dimensions(dim).edge_padding_low() != 0 || padding_config.dimensions(dim).edge_padding_high() != 0 || padding_config.dimensions(dim).interior_padding() != 0; }; if (does_dim_have_padding(consumer->padding_config(), old_batch_dim) || does_dim_have_padding(consumer->padding_config(), old_space_dim)) { return false; } return true; } if (consumer->opcode() == HloOpcode::kSlice) { auto operand = consumer->mutable_operand(0); if (!instr_to_dim_map_.contains(operand)) { return false; } auto result = instr_to_dim_map_[operand]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; if (consumer->shape().dimensions(old_batch_dim) != operand->shape().dimensions(old_batch_dim)) { return false; } if (consumer->shape().dimensions(old_space_dim) != operand->shape().dimensions(old_space_dim)) { return false; } return true; } if (SupportedDotForPropagation(consumer, producer)) { return true; } if (consumer->opcode() == HloOpcode::kReduce) { if (consumer->shape().IsTuple()) { return false; } auto reduce_dims = consumer->dimensions(); auto result = instr_to_dim_map_[consumer->mutable_operand(0)]; const int64_t batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; if (!absl::c_linear_search(reduce_dims, batch_dim) && !absl::c_linear_search(reduce_dims, space_dim)) { return true; } return absl::c_linear_search(reduce_dims, batch_dim) && absl::c_linear_search(reduce_dims, space_dim); } if (consumer->opcode() == HloOpcode::kReduceWindow && consumer->shape().IsTuple()) { return false; } if (consumer->opcode() == HloOpcode::kReduceWindow || consumer->opcode() == HloOpcode::kSelectAndScatter) { auto first_operand = consumer->mutable_operand(0); auto window = consumer->window(); if (instr_to_dim_map_.count(first_operand) <= 0) { VLOG(1) << "Dim map not found on windowed operand. Window dim count " << window.dimensions().size(); return false; } auto result = instr_to_dim_map_[first_operand]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; if (window.dimensions(old_batch_dim).size() != 1) { return false; } if (window.dimensions(old_space_dim).padding_low() != 0) { return false; } if (window.dimensions(old_space_dim).base_dilation() != 1 || window.dimensions(old_space_dim).window_dilation() != 1) { return false; } if (window.dimensions(old_batch_dim).base_dilation() != 1 || window.dimensions(old_batch_dim).window_dilation() != 1) { return false; } if (window.dimensions(old_space_dim).padding_high() > window.dimensions(old_space_dim).size()) { return false; } if (old_to_new_instrs_.count(first_operand) <= 0) { return false; } auto new_operand = old_to_new_instrs_[first_operand]; auto permute_dims = instr_to_dim_permute_map_[new_operand]; if (consumer->opcode() == HloOpcode::kSelectAndScatter) { const int64_t new_space_dim = DimLookUp(permute_dims, old_space_dim); if (new_operand->shape().dimensions(new_space_dim) % window.dimensions(old_space_dim).stride() != 0) { return false; } if (!ShapeUtil::ElementIsFloating(consumer->shape())) { return false; } auto scatter_comp = consumer->scatter(); if (!Match(scatter_comp->root_instruction(), m::AddAnyOrder(m::Parameter(0), m::Parameter(1)))) { return false; } auto select_comp = consumer->select(); if (!Match(select_comp->root_instruction(), m::Compare(m::Parameter(0), m::Parameter(1)) .WithComparisonDirection(ComparisonDirection::kGe)) && !Match(select_comp->root_instruction(), m::Compare(m::Parameter(1), m::Parameter(0)) .WithComparisonDirection(ComparisonDirection::kGe))) { return false; } if (consumer->window().dimensions(old_space_dim).padding_low() != 0) { return false; } } return true; } return false; } absl::StatusOr<bool> ConvolutionVisitor::Propagate(HloInstruction* consumer, HloInstruction* producer) { auto computation = consumer->parent(); if (IsTrivialElementwise(consumer)) { auto dim_map_val = instr_to_dim_map_[producer]; auto new_consumer = computation->AddInstruction(consumer->Clone()); bool is_pivot_producer_modified = false; if (consumer->IsElementwiseBinary() || consumer->opcode() == HloOpcode::kSelect) { int64_t pivot_operand_number = -1; HloInstruction* pivot_operand = nullptr; for (int i = 0; i < consumer->operand_count(); ++i) { if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) { continue; } auto operand = consumer->mutable_operand(i); if (old_to_new_instrs_.contains(operand)) { if (pivot_operand_number == -1 || old_to_new_instrs_[pivot_operand]->shape().dimensions() < old_to_new_instrs_[operand]->shape().dimensions()) { is_pivot_producer_modified = true; pivot_operand_number = i; pivot_operand = consumer->mutable_operand(pivot_operand_number); } } } if (pivot_operand_number != -1) { producer = pivot_operand; } } for (int64_t i = 0; i < consumer->operand_count(); ++i) { std::vector<HloInstruction*> instructions_to_transform; if (consumer->operand(i)->opcode() == HloOpcode::kBroadcast) { auto broadcast = consumer->mutable_operand(i); PropagateOnBroadcast(broadcast, producer); HloInstruction* new_broadcast = nullptr; auto new_producer = old_to_new_instrs_[producer]; for (auto previous_broadcast : broadcast_map_[broadcast]) { if (ShapeUtil::CompatibleIgnoringElementType( previous_broadcast->shape(), new_producer->shape())) { new_broadcast = previous_broadcast; break; } } CHECK_NE(new_broadcast, nullptr); TF_CHECK_OK( new_consumer->ReplaceOperandWithDifferentShape(i, new_broadcast)); } else if (old_to_new_instrs_.contains(consumer->mutable_operand(i))) { HloInstruction* operand_to_use = nullptr; auto result = instr_to_dim_map_[producer]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; const int64_t old_batch_size = producer->shape().dimensions(old_batch_dim); HloInstruction* new_instr = old_to_new_instrs_[consumer->mutable_operand(i)]; HloInstruction* pivot_new_instr = old_to_new_instrs_[producer]; auto permute_dims = instr_to_dim_permute_map_[new_instr]; const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim); const int64_t space_dim = DimLookUp(permute_dims, old_space_dim); const int64_t batch_size = new_instr->shape().dimensions(batch_dim); if (new_instr->shape().dimensions(space_dim) != pivot_new_instr->shape().dimensions(space_dim)) { CHECK_EQ(batch_dim + 1, space_dim); std::vector<int64_t> new_dimensions( new_instr->shape().dimensions().begin(), new_instr->shape().dimensions().end()); new_dimensions[space_dim] *= (batch_size / old_batch_size); new_dimensions[batch_dim] = old_batch_size; TF_ASSIGN_OR_RETURN(HloInstruction * reshape, MakeReshapeHlo(new_dimensions, new_instr)); const int64_t pivot_space_size = pivot_new_instr->shape().dimensions(space_dim) * batch_size / old_batch_size; CHECK(pivot_space_size > new_dimensions[space_dim] || !is_pivot_producer_modified); PaddingConfig padding_config = MakeNoPaddingConfig(reshape->shape().dimensions_size()); padding_config.mutable_dimensions(space_dim)->set_edge_padding_high( pivot_space_size - new_dimensions[space_dim]); padding_config.mutable_dimensions(space_dim)->set_edge_padding_low(0); HloInstruction* padding = consumer->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(reshape->shape().element_type()))); TF_ASSIGN_OR_RETURN( HloInstruction * padded_operand, MakePadHlo(reshape, padding, padding_config, &reshape->metadata(), &reshape->frontend_attributes())); TF_ASSIGN_OR_RETURN( operand_to_use, MakeReshapeHlo(pivot_new_instr->shape().dimensions(), padded_operand)); } else { operand_to_use = old_to_new_instrs_[consumer->mutable_operand(i)]; } TF_CHECK_OK( new_consumer->ReplaceOperandWithDifferentShape(i, operand_to_use)); } else if (consumer->IsElementwiseBinary() && consumer->mutable_operand(i)->opcode() == HloOpcode::kBroadcast && IsBroadcastTree(consumer->mutable_operand(i), producer, instructions_to_transform)) { RewriteBroadcastTree(producer, instructions_to_transform); TF_CHECK_OK(new_consumer->ReplaceOperandWithDifferentShape( i, old_to_new_instrs_[consumer->mutable_operand(i)])); } else if (consumer->operand(i)->opcode() == HloOpcode::kConstant) { TF_ASSIGN_OR_RETURN( auto new_constant, PropagateOnConstant(consumer->mutable_operand(i), producer)); TF_CHECK_OK( new_consumer->ReplaceOperandWithDifferentShape(i, new_constant)); } } auto old_type = new_consumer->mutable_shape()->element_type(); *(new_consumer->mutable_shape()) = old_to_new_instrs_[producer]->shape(); new_consumer->mutable_shape()->set_element_type(old_type); old_to_new_instrs_[consumer] = new_consumer; instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val); CHECK(instr_to_dim_permute_map_.contains(old_to_new_instrs_[producer])); instr_to_dim_permute_map_[new_consumer] = std::vector<int64_t>( instr_to_dim_permute_map_[old_to_new_instrs_[producer]]); VLOG(2) << " new_consumer " << new_consumer->ToString() << " old_to_new_instrs_[producer] " << old_to_new_instrs_[producer]->ToString() << " permute dims " << instr_to_dim_permute_map_.count(new_consumer); return true; } if (consumer->opcode() == HloOpcode::kConvolution) { if (IsConvSuitableForSpaceToBatch(consumer)) { TF_CHECK_OK(PropagateOnConv(consumer)); return true; } else { TF_CHECK_OK(PropagateOnBackpropFilterConv(consumer)); return false; } } if (consumer->opcode() == HloOpcode::kConcatenate) { TF_CHECK_OK(PropagateOnConcat(consumer)); return true; } if (consumer->opcode() == HloOpcode::kReverse) { TF_CHECK_OK(PropagateOnReverse(consumer)); return true; } if (consumer->opcode() == HloOpcode::kDot) { auto dim_map_val = instr_to_dim_map_[producer]; const int64_t old_batch_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)]; int64_t new_batch_dim = -1; int64_t new_space_dim = -1; int64_t outer = 0; for (int64_t i = 0; i < producer->shape().rank(); ++i) { if (absl::c_linear_search( consumer->dot_dimension_numbers().lhs_batch_dimensions(), i) || absl::c_linear_search( consumer->dot_dimension_numbers().lhs_contracting_dimensions(), i)) { continue; } if (i == old_batch_dim) { new_batch_dim = outer + consumer->dot_dimension_numbers().lhs_batch_dimensions_size(); } if (i == old_space_dim) { new_batch_dim = outer + consumer->dot_dimension_numbers().lhs_batch_dimensions_size(); } ++outer; } std::vector<int64_t> dim_map(kNumMappedDims); dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim; dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim; dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = consumer->shape().rank() - 1; instr_to_dim_map_[consumer] = dim_map; auto new_consumer = computation->AddInstruction(consumer->Clone()); new_consumer->mutable_shape()->mutable_dimensions()[new_batch_dim] = producer->shape().dimensions(old_batch_dim); new_consumer->mutable_shape()->mutable_dimensions()[new_space_dim] = producer->shape().dimensions(old_space_dim); old_to_new_instrs_[consumer] = new_consumer; return true; } if (consumer->opcode() == HloOpcode::kPad) { TF_CHECK_OK(PropagateOnPad(consumer)); return true; } if (consumer->opcode() == HloOpcode::kSlice) { TF_CHECK_OK(PropagateOnSlice(consumer)); return true; } if (consumer->opcode() == HloOpcode::kReduce) { auto reduce_dims = consumer->dimensions(); auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)]; auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)]; auto permute_dims = instr_to_dim_permute_map_[first_operand]; const int64_t old_batch_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t space_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)]; const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim); const int64_t new_space_dim = DimLookUp(permute_dims, space_dim); std::vector<int64_t> changed_dims(consumer->dimensions().size()); if (!absl::c_linear_search(reduce_dims, old_batch_dim) && !absl::c_linear_search(reduce_dims, space_dim)) { for (int64_t i = 0; i < consumer->dimensions().size(); ++i) { changed_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i)); } int64_t new_output_batch_dim = new_batch_dim; int64_t new_output_space_dim = new_space_dim; for (int64_t i = 0; i < consumer->dimensions().size(); ++i) { if (changed_dims[i] < new_batch_dim) { new_output_batch_dim--; } if (changed_dims[i] < new_space_dim) { new_output_space_dim--; } } int64_t old_output_batch_dim = old_batch_dim; int64_t old_output_space_dim = space_dim; for (int64_t i = 0; i < consumer->dimensions().size(); ++i) { if (reduce_dims[i] < old_batch_dim) { old_output_batch_dim--; } if (reduce_dims[i] < space_dim) { old_output_space_dim--; } } HloInstruction* new_consumer = nullptr; TF_ASSIGN_OR_RETURN( new_consumer, MakeReduceHlo(first_operand, consumer->mutable_operand(1), changed_dims, consumer->called_computations()[0])); VLOG(3) << " new_output_batch_dim " << new_output_batch_dim << " size " << first_operand->shape().dimensions(new_batch_dim) << " new_output_space_dim " << new_output_space_dim << " size " << first_operand->shape().dimensions(new_space_dim); std::vector<int64_t> dim_map(kNumMappedDims); dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = old_output_batch_dim; dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = old_output_space_dim; dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = -1; instr_to_dim_map_[consumer] = dim_map; const int64_t rank = first_operand->shape().rank(); const int64_t output_rank = new_consumer->shape().rank(); std::vector<int64_t> old_reduce_output_to_input(output_rank); int dim_number_to_assign_old = 0; for (int64_t i = 0; i < rank; ++i) { if (auto it = absl::c_find(reduce_dims, i); it != reduce_dims.end()) { continue; } old_reduce_output_to_input[dim_number_to_assign_old++] = i; } std::vector<int64_t> new_reduce_output_to_input(output_rank); int dim_number_to_assign_new = 0; for (int64_t i = 0; i < rank; ++i) { if (auto it = absl::c_find(changed_dims, i); it != changed_dims.end()) { continue; } new_reduce_output_to_input[dim_number_to_assign_new++] = i; } std::vector<int64_t> new_permute_dims(output_rank); for (int64_t i = 0; i < output_rank; ++i) { new_permute_dims[i] = std::distance( new_reduce_output_to_input.begin(), absl::c_find( new_reduce_output_to_input, DimLookUp(permute_dims, old_reduce_output_to_input[i]))); } instr_to_dim_permute_map_[new_consumer] = new_permute_dims; old_to_new_instrs_[consumer] = new_consumer; return true; } HloInstruction* new_consumer = computation->AddInstruction(consumer->Clone()); auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0)); std::vector<int64_t> old_spatial_dims = retval.first; std::vector<int64_t> new_spatial_dims = retval.second; TF_ASSIGN_OR_RETURN( first_operand, SelectValidPortion(first_operand, consumer->mutable_operand(0), consumer->mutable_operand(1), new_batch_dim, new_spatial_dims, old_batch_dim, old_spatial_dims)); for (int64_t i = 0; i < new_consumer->dimensions().size(); ++i) { changed_dims[i] = DimLookUp(permute_dims, new_consumer->dimensions(i)); } *(new_consumer->mutable_dimensions()) = changed_dims; TF_CHECK_OK( new_consumer->ReplaceOperandWithDifferentShape(0, first_operand)); old_to_new_instrs_[consumer] = new_consumer; instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val); return false; } if (consumer->opcode() == HloOpcode::kTranspose) { auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)]; auto new_consumer = computation->AddInstruction(first_operand->Clone()); old_to_new_instrs_[consumer] = new_consumer; auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)]; const int64_t old_batch_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kSpace0)]; const int64_t old_feature_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kFeature)]; int64_t new_batch_dim, new_space_dim, new_feature_dim; std::vector<int64_t> new_dimensions(consumer->dimensions().size()); for (int64_t ctr = 0; ctr < consumer->dimensions().size(); ++ctr) { int64_t dim = consumer->dimensions(ctr); if (dim == old_batch_dim) { new_batch_dim = ctr; } if (dim == old_space_dim) { new_space_dim = ctr; } if (dim == old_feature_dim) { new_feature_dim = ctr; } } std::vector<int64_t> dim_map(kNumMappedDims); dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = new_batch_dim; dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = new_feature_dim; dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = new_space_dim; instr_to_dim_map_[consumer] = dim_map; std::vector<int64_t> new_permute_dims(consumer->dimensions().size()); auto permute_dims = instr_to_dim_permute_map_[first_operand]; for (int64_t i = 0; i < consumer->dimensions().size(); ++i) { new_permute_dims[i] = DimLookUp(permute_dims, consumer->dimensions(i)); } instr_to_dim_permute_map_[new_consumer] = new_permute_dims; return true; } if (consumer->opcode() == HloOpcode::kReduceWindow || consumer->opcode() == HloOpcode::kSelectAndScatter) { bool is_select_and_scatter = consumer->opcode() == HloOpcode::kSelectAndScatter; auto first_operand = old_to_new_instrs_[consumer->mutable_operand(0)]; auto init_val = is_select_and_scatter ? consumer->mutable_operand(2) : consumer->mutable_operand(1); auto dim_map_val = instr_to_dim_map_[consumer->mutable_operand(0)]; auto retval = GetSpatialDimsToSplit(consumer->mutable_operand(0)); std::vector<int64_t> old_spatial_dims = retval.first; std::vector<int64_t> new_spatial_dims = retval.second; const int64_t old_batch_dim = dim_map_val[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = old_spatial_dims[0]; auto permute_dims = instr_to_dim_permute_map_[first_operand]; const int64_t new_batch_dim = DimLookUp(permute_dims, old_batch_dim); const int64_t new_space_dim = new_spatial_dims[0]; auto new_shape = first_operand->shape(); auto old_shape = consumer->mutable_operand(0)->shape(); const int64_t new_space_size = new_shape.dimensions(new_space_dim); const int64_t stride = consumer->window().dimensions(old_space_dim).stride(); auto pad_val = is_select_and_scatter ? consumer->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::MinValue( consumer->operand(2)->shape().element_type()))) : init_val; TF_ASSIGN_OR_RETURN( first_operand, SelectValidPortion(first_operand, consumer->mutable_operand(0), pad_val, new_batch_dim, new_spatial_dims, old_batch_dim, old_spatial_dims)); const int64_t extra_space = new_space_size % stride; if (extra_space) { CHECK_EQ(consumer->opcode(), HloOpcode::kReduceWindow); const int64_t old_batch_size = old_shape.dimensions(old_batch_dim); const int64_t old_space_size = old_shape.dimensions(old_space_dim); if ((new_space_size - extra_space) * old_batch_size * ctrl_.number_of_splits >= old_batch_size * old_space_size) { TF_ASSIGN_OR_RETURN( first_operand, ChangeSpatialSizeOnSpaceToBatchedShape( first_operand, new_batch_dim, old_batch_size, new_spatial_dims, new_space_size - extra_space)); } else { TF_ASSIGN_OR_RETURN( first_operand, ChangeSpatialSizeOnSpaceToBatchedShape( first_operand, new_batch_dim, old_batch_size, new_spatial_dims, new_space_size + stride - extra_space, true)); } } const int64_t window_size = consumer->window().dimensions(old_space_dim).size(); const int64_t last_overlap_point = ((new_space_size - 1) / stride) * stride; VLOG(1) << "last_overlap_point " << last_overlap_point << " window_size " << window_size << " new_space_size " << new_space_size; const int64_t halo_size = last_overlap_point + window_size - new_space_size; if (halo_size > 0) { TF_ASSIGN_OR_RETURN( first_operand, HaloDuplicateWithSlice(first_operand, new_spatial_dims, new_batch_dim, 0, halo_size, init_val)); } Window new_win; for (int64_t i = 0; i < consumer->window().dimensions().size(); ++i) { auto dim = ReverseDimLookUp(permute_dims, i); new_win.add_dimensions(); new_win.mutable_dimensions(i)->set_stride( consumer->window().dimensions(dim).stride()); new_win.mutable_dimensions(i)->set_size( consumer->window().dimensions(dim).size()); if (i == old_space_dim) { new_win.mutable_dimensions(i)->set_padding_high(0); new_win.mutable_dimensions(i)->set_padding_low(0); } else { new_win.mutable_dimensions(i)->set_padding_high( consumer->window().dimensions(dim).padding_high()); new_win.mutable_dimensions(i)->set_padding_low( consumer->window().dimensions(dim).padding_low()); } new_win.mutable_dimensions(i)->set_window_dilation( consumer->window().dimensions(dim).window_dilation()); new_win.mutable_dimensions(i)->set_base_dilation( consumer->window().dimensions(dim).base_dilation()); new_win.mutable_dimensions(i)->set_window_reversal( consumer->window().dimensions(dim).window_reversal()); } new_shape = first_operand->shape(); HloInstruction* new_consumer = nullptr; if (is_select_and_scatter) { auto second_operand = old_to_new_instrs_[consumer->mutable_operand(1)]; auto select_comp = consumer->select(); auto scatter_comp = consumer->scatter(); TF_ASSIGN_OR_RETURN( auto new_select_and_scatter_shape, ShapeInference::InferSelectAndScatterShape( new_shape, select_comp->ComputeProgramShape(), new_win, second_operand->shape(), init_val->shape(), scatter_comp->ComputeProgramShape())); new_consumer = computation_->AddInstruction( HloInstruction::CreateSelectAndScatter( new_select_and_scatter_shape, first_operand, select_comp, new_win, second_operand, init_val, scatter_comp), &consumer->metadata(), &consumer->frontend_attributes()); TF_CHECK_OK( new_consumer->ReplaceOperandWithDifferentShape(0, first_operand)); TF_CHECK_OK( new_consumer->ReplaceOperandWithDifferentShape(1, second_operand)); VLOG(2) << "New select and scatter " << new_consumer->ToString(); if (halo_size > 0) { const int64_t rank = new_consumer->shape().rank(); const int64_t batch_size = new_consumer->shape().dimensions(new_batch_dim); std::vector<int64_t> start_indices(rank, 0), end_indices(new_consumer->shape().dimensions().begin(), new_consumer->shape().dimensions().end()), strides(rank, 1); start_indices[new_space_dim] = new_space_size; end_indices[new_space_dim] = new_space_size + halo_size; end_indices[new_batch_dim] = batch_size - 1; TF_ASSIGN_OR_RETURN( HloInstruction * bottom, MakeSliceHlo(new_consumer, start_indices, end_indices, strides, &consumer->metadata(), &consumer->frontend_attributes())); std::vector<int64_t> start_indices_top(rank, 0), end_indices_top(new_consumer->shape().dimensions().begin(), new_consumer->shape().dimensions().end()); end_indices_top[new_space_dim] = halo_size; start_indices_top[new_batch_dim] = 1; TF_ASSIGN_OR_RETURN( HloInstruction * top, MakeSliceHlo(new_consumer, start_indices_top, end_indices_top, strides, &consumer->metadata(), &consumer->frontend_attributes())); HloInstruction* default_fill = MakeBroadcastHlo( init_val, {}, top->shape().dimensions(), &init_val->metadata(), &init_val->frontend_attributes()); TF_ASSIGN_OR_RETURN( HloInstruction * bottom_compare, MakeCompareHlo(ComparisonDirection::kNe, bottom, default_fill, &bottom->metadata(), &bottom->frontend_attributes())); TF_ASSIGN_OR_RETURN( HloInstruction * bottom_taken, MakeSelectHlo(bottom_compare, bottom, default_fill, nullptr, &bottom_compare->metadata(), &bottom_compare->frontend_attributes())); TF_ASSIGN_OR_RETURN( HloInstruction * top_compare, MakeCompareHlo(ComparisonDirection::kNe, top, default_fill, &top->metadata(), &top->frontend_attributes())); TF_ASSIGN_OR_RETURN(HloInstruction * top_taken, MakeSelectHlo(top_compare, top, bottom_taken, nullptr, &top_compare->metadata(), &top_compare->frontend_attributes())); TF_ASSIGN_OR_RETURN(HloInstruction * both_compare, MakeBinaryHlo(HloOpcode::kAnd, top_compare, bottom_compare, &consumer->metadata(), &consumer->frontend_attributes())); TF_ASSIGN_OR_RETURN( HloInstruction * both_added, MakeBinaryHlo(HloOpcode::kAdd, top, bottom, &consumer->metadata(), &consumer->frontend_attributes())); TF_ASSIGN_OR_RETURN( HloInstruction * final_selection, MakeSelectHlo(both_compare, both_added, top_taken, nullptr, &both_compare->metadata(), &both_compare->frontend_attributes())); PaddingConfig padding_config = MakeNoPaddingConfig(final_selection->shape().dimensions_size()); padding_config.mutable_dimensions(new_batch_dim) ->set_edge_padding_low(1); padding_config.mutable_dimensions(new_space_dim) ->set_edge_padding_high(new_space_size); HloInstruction* padding = computation_->AddInstruction( HloInstruction::CreateConstant( LiteralUtil::Zero(final_selection->shape().element_type())), &consumer->metadata(), &consumer->frontend_attributes()); TF_ASSIGN_OR_RETURN( final_selection, MakePadHlo(final_selection, padding, padding_config, &final_selection->metadata(), &final_selection->frontend_attributes())); tsl::core::Bitmap b(batch_size * (new_space_size + halo_size)); for (int k = 0; k < batch_size * (new_space_size + halo_size); ++k) { const int64_t space_index = k % (new_space_size + halo_size); const int64_t batch_index = (k / (new_space_size + halo_size)); if (batch_index < 1 || space_index >= halo_size) { b.set(k); } else { b.clear(k); } } auto arg_literal = LiteralUtil::CreateR1(b); VLOG(4) << "Slice mask created: arg literal " << arg_literal.ToString(); HloInstruction* slice_mask = computation_->AddInstruction( HloInstruction::CreateConstant(std::move(arg_literal)), &consumer->metadata(), &consumer->frontend_attributes()); std::vector<int64_t> slice_mask_reshape_dims(2); slice_mask_reshape_dims[0] = batch_size; slice_mask_reshape_dims[1] = (new_space_size + halo_size); TF_ASSIGN_OR_RETURN( HloInstruction * slice_mask_reshaped, MakeReshapeHlo(slice_mask_reshape_dims, slice_mask)); HloInstruction* shape_mask = MakeBroadcastHlo( slice_mask_reshaped, {new_batch_dim, new_space_dim}, final_selection->shape().dimensions(), &slice_mask->metadata(), &slice_mask->frontend_attributes()); TF_ASSIGN_OR_RETURN( new_consumer, MakeSelectHlo(shape_mask, new_consumer, final_selection, nullptr, &shape_mask->metadata(), &shape_mask->frontend_attributes())); } auto previous_shape = old_to_new_instrs_[consumer->mutable_operand(0)]->shape(); std::vector<int64_t> start_indices(previous_shape.rank(), 0), end_indices(previous_shape.dimensions().begin(), previous_shape.dimensions().end()), strides(previous_shape.rank(), 1); TF_ASSIGN_OR_RETURN(new_consumer, MakeSliceHlo(new_consumer, start_indices, end_indices, strides, &consumer->metadata(), &consumer->frontend_attributes())); } else { auto reduce_comp = consumer->to_apply(); TF_ASSIGN_OR_RETURN(auto new_reduce_window_shape, ShapeInference::InferReduceWindowShape( new_shape, init_val->shape(), new_win)); new_consumer = computation_->AddInstruction( HloInstruction::CreateReduceWindow(new_reduce_window_shape, first_operand, init_val, new_win, reduce_comp), &consumer->metadata(), &consumer->frontend_attributes()); TF_CHECK_OK( new_consumer->ReplaceOperandWithDifferentShape(0, first_operand)); VLOG(1) << "New reduce window " << new_consumer->ToString(); } old_to_new_instrs_[consumer] = new_consumer; instr_to_dim_map_[consumer] = std::vector<int64_t>(dim_map_val); instr_to_dim_permute_map_[new_consumer] = std::vector<int64_t>( instr_to_dim_permute_map_[old_to_new_instrs_[consumer->mutable_operand( 0)]]); return true; } LOG(FATAL) << "Trying to propagate through an unsupported instruction " << consumer->ToString(); return true; } absl::StatusOr<HloInstruction*> ConvolutionVisitor::SelectValidPortion( HloInstruction* new_instr, HloInstruction* old_instr, HloInstruction* select_val, int64_t new_batch_dim, absl::Span<const int64_t> new_space_dims, int64_t old_batch_dim, absl::Span<const int64_t> old_space_dims) { auto new_shape = new_instr->shape(); auto old_shape = old_instr->shape(); VLOG(1) << "In SelectValidPortion new_batch_dim " << new_batch_dim << " new_space_dim " << new_space_dims[0] << " old_batch_dim " << old_batch_dim << " old_space_dim " << old_space_dims[0]; const int64_t new_batch_size = new_shape.dimensions(new_batch_dim); const int64_t new_space_size = new_shape.dimensions(new_space_dims[0]); const int64_t old_batch_size = old_shape.dimensions(old_batch_dim); const int64_t old_space_size = old_shape.dimensions(old_space_dims[0]); CHECK_EQ(new_batch_size % old_batch_size, 0) << " New batch size " << new_batch_size << " old batch size " << old_batch_size; const int64_t num_splits = ctrl_.number_of_splits; const int64_t spatial_dim_count = new_space_dims.size(); std::vector<int64_t> bounds(2 + spatial_dim_count, new_space_size); bounds[0] = old_batch_size; bounds[1] = IPow<int64_t>(num_splits, spatial_dim_count); const int64_t total_new_space = IPow<int64_t>(new_space_size, spatial_dim_count); tsl::core::Bitmap b(new_batch_size * total_new_space); for (int k = 0; k < new_batch_size * total_new_space; ++k) { auto radix = ToMixedRadix(k, bounds); bool out_of_bounds = false; int64_t batch_residue = 1; for (int i = 0; i < spatial_dim_count; ++i) { const int64_t space_index = radix[2 + i]; const int64_t batch_index = (radix[1] / batch_residue) % num_splits; batch_residue *= num_splits; if (batch_index * new_space_size + space_index >= old_space_size) { out_of_bounds = true; } } if (!out_of_bounds) { b.set(k); } else { b.clear(k); } } auto arg_literal = LiteralUtil::CreateR1(b); VLOG(4) << "Slice mask created: arg literal " << arg_literal.ToString(); HloInstruction* slice_mask = computation_->AddInstruction( HloInstruction::CreateConstant(std::move(arg_literal)), &old_instr->metadata(), &old_instr->frontend_attributes()); std::vector<int64_t> slice_mask_reshape_dims(1 + spatial_dim_count, new_space_size); slice_mask_reshape_dims[0] = new_batch_size; TF_ASSIGN_OR_RETURN(HloInstruction * slice_mask_reshaped, MakeReshapeHlo(slice_mask_reshape_dims, slice_mask)); std::vector<int64_t> broadcast_dims(new_space_dims.begin(), new_space_dims.end()); broadcast_dims.insert(broadcast_dims.begin(), new_batch_dim); HloInstruction* shape_mask = MakeBroadcastHlo( slice_mask_reshaped, broadcast_dims, new_instr->shape().dimensions(), &slice_mask_reshaped->metadata(), &slice_mask_reshaped->frontend_attributes()); VLOG(1) << "Shape mask made " << shape_mask->ToString(); HloInstruction* zeroes = MakeBroadcastHlo( select_val, {}, new_instr->shape().dimensions(), &select_val->metadata(), &select_val->frontend_attributes()); TF_ASSIGN_OR_RETURN(new_instr, MakeSelectHlo(shape_mask, new_instr, zeroes, nullptr, &shape_mask->metadata(), &shape_mask->frontend_attributes())); return new_instr; } absl::StatusOr<HloInstruction*> ConvolutionVisitor::BatchToSpace( HloInstruction* old_instr) { if (batch_to_space_map_.count(old_instr)) { CHECK_NE(batch_to_space_map_[old_instr], nullptr); return batch_to_space_map_[old_instr]; } auto result = instr_to_dim_map_[old_instr]; const int64_t old_batch_dim = result[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = result[DimMapper(SpaceToBatchDimMap::kSpace0)]; const int64_t old_batch_size = old_instr->shape().dimensions(old_batch_dim); CHECK(old_to_new_instrs_.contains(old_instr)); auto new_instr = old_to_new_instrs_[old_instr]; VLOG(2) << "old_batch_dim " << old_batch_dim << " old_space_dim " << old_space_dim << " old_instr " << old_instr->ToString() << "\n new_instr " << new_instr->ToString() << " permute dims " << instr_to_dim_permute_map_.count(new_instr) << " old_batch_size " << old_batch_size; CHECK(instr_to_dim_permute_map_.contains(new_instr)); auto permute_dims = instr_to_dim_permute_map_[new_instr]; const int64_t batch_dim = DimLookUp(permute_dims, old_batch_dim); const int64_t space_dim = DimLookUp(permute_dims, old_space_dim); const int64_t spatial_dim_size = new_instr->shape().dimensions(space_dim); std::vector<int64_t> split_spatial_dimensions( ctrl_.count_of_dimensions_to_convert); absl::c_iota(split_spatial_dimensions, space_dim); TF_ASSIGN_OR_RETURN(new_instr, SplitAndTransposeMergedBatch( new_instr, batch_dim, old_batch_size, split_spatial_dimensions)); std::vector<int64_t> new_dimensions(new_instr->shape().dimensions().begin(), new_instr->shape().dimensions().end()); new_dimensions.erase(new_dimensions.begin() + split_spatial_dimensions[0], new_dimensions.begin() + split_spatial_dimensions[0] + ctrl_.count_of_dimensions_to_convert); for (auto spatial_dimension : split_spatial_dimensions) { new_dimensions[spatial_dimension] = spatial_dim_size * ctrl_.number_of_splits; } TF_ASSIGN_OR_RETURN(HloInstruction * reshape, MakeReshapeHlo(new_dimensions, new_instr)); VLOG(1) << "Batch to space reshape " << reshape->ToString(); const int64_t rank = old_instr->shape().rank(); std::vector<int64_t> start_indices(rank, 0), end_indices(new_dimensions.begin(), new_dimensions.end()), strides(rank, 1); for (auto spatial_dimension : split_spatial_dimensions) { end_indices[spatial_dimension] = old_instr->shape().dimensions(old_space_dim); } TF_ASSIGN_OR_RETURN( HloInstruction * output_slice, MakeSliceHlo(reshape, start_indices, end_indices, strides, &reshape->metadata(), &reshape->frontend_attributes())); VLOG(1) << "Batch to space slice " << output_slice->ToString(); std::vector<int64_t> transpose_dims(permute_dims); TF_ASSIGN_OR_RETURN(HloInstruction * output_transpose, MakeTransposeHlo(output_slice, transpose_dims)); old_instr->SetupDerivedInstruction(output_transpose); batch_to_space_map_[old_instr] = output_transpose; return output_transpose; } absl::Status ConvolutionVisitor::PropagateOnUsers(HloInstruction* old_conv) { std::queue<std::pair<HloInstruction*, HloInstruction*>> propagation_worklist; if (old_conv->user_count() == 0) { TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space, BatchToSpace(old_conv)); VLOG(1) << "Replacing the root instruction to " << batch_to_space->ToString(); TF_CHECK_OK(computation_->ReplaceInstruction(old_conv, batch_to_space)); VLOG(1) << "Replacement successful"; return absl::OkStatus(); } int64_t iteration_count = 0; propagation_worklist.push( std::make_pair(old_conv, old_conv->mutable_operand(0))); while (!propagation_worklist.empty()) { auto top = propagation_worklist.front(); auto node = top.first; auto parent = top.second; VLOG(1) << "Traversing for propagation operating on " << node->ToString(); propagation_worklist.pop(); if (old_to_new_instrs_.count(node) > 0 && iteration_count != 0) { continue; } bool needs_further_propagation = true; if (iteration_count != 0) { TF_ASSIGN_OR_RETURN(needs_further_propagation, Propagate(node, parent)); } iteration_count++; if (node->parent()->root_instruction() == node) { if (!needs_further_propagation) { VLOG(1) << "Replacing the root instruction to " << old_to_new_instrs_[node]->ToString(); TF_CHECK_OK( computation_->ReplaceInstruction(node, old_to_new_instrs_[node])); continue; } TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space, BatchToSpace(node)); VLOG(1) << "Replacing the root instruction to " << batch_to_space->ToString(); TF_CHECK_OK(computation_->ReplaceInstruction(node, batch_to_space)); } else { if (!needs_further_propagation) { TF_CHECK_OK( computation_->ReplaceInstruction(node, old_to_new_instrs_[node])); continue; } HloInstructionSet unsupported_users; for (auto user : node->users()) { if (!SupportedOpForPropagation(user, node)) { VLOG(1) << "Unsupported op found " << user->ToString(); unsupported_users.insert(user); continue; } if (CanPropagate(user, node)) { non_propagatable_instrs_.erase(user); propagation_worklist.push(std::make_pair(user, node)); } else { non_propagatable_instrs_.insert(user); } } if (!unsupported_users.empty()) { TF_ASSIGN_OR_RETURN(HloInstruction * batch_to_space, BatchToSpace(node)); for (auto user : unsupported_users) { for (int64_t i = 0; i < user->operand_count(); ++i) { if (user->operand(i) == node) { TF_CHECK_OK(user->ReplaceOperandWith(i, batch_to_space)); } } } } } } return absl::OkStatus(); } absl::Status ConvolutionVisitor::PropagateOnConv(HloInstruction* convolution) { auto activations_old = convolution->mutable_operand(0); CHECK(old_to_new_instrs_.contains(activations_old)); auto activations_new = old_to_new_instrs_[activations_old]; auto permute_dims = instr_to_dim_permute_map_[activations_new]; auto original_conv_dims = convolution->convolution_dimension_numbers(); auto old_new_dims = GetSpatialDimsToSplit(activations_old); std::vector<int64_t> old_spatial_dims = old_new_dims.first; std::vector<int64_t> new_spatial_dims = old_new_dims.second; auto permuted_conv_dims_numbers = original_conv_dims; int64_t activations_batch_dim = DimLookUp(permute_dims, original_conv_dims.input_batch_dimension()); int64_t activations_feature_dim = DimLookUp(permute_dims, original_conv_dims.input_feature_dimension()); permuted_conv_dims_numbers.set_input_batch_dimension(activations_batch_dim); permuted_conv_dims_numbers.set_input_feature_dimension( activations_feature_dim); for (int64_t i = 0; i < original_conv_dims.input_spatial_dimensions_size(); ++i) { permuted_conv_dims_numbers.set_input_spatial_dimensions( i, DimLookUp(permute_dims, original_conv_dims.input_spatial_dimensions(i))); } const int64_t old_batch_dim = original_conv_dims.input_batch_dimension(); const int64_t old_batch_size = activations_old->shape().dimensions(old_batch_dim); ConvDetails c = GetConvolutionDetails(convolution, permuted_conv_dims_numbers); VLOG(1) << "Propagating on conv activations_batch_dim " << activations_batch_dim << " spatial_dimension_to_split " << c.spatial_dimensions_to_split[0] << " old_batch_size " << old_batch_size; TF_ASSIGN_OR_RETURN( auto retval, BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers, activations_batch_dim, &new_spatial_dims)); activations_new = retval.instr; std::vector<int64_t> trans_dims = retval.transpose_dims; CHECK(!trans_dims.empty()); auto select_val = computation_->AddInstruction( HloInstruction::CreateConstant( LiteralUtil::Zero(activations_new->shape().element_type())), &convolution->metadata(), &convolution->frontend_attributes()); TF_ASSIGN_OR_RETURN( activations_new, SelectValidPortion(activations_new, activations_old, select_val, activations_batch_dim, new_spatial_dims, old_batch_dim, old_spatial_dims)); auto new_dim_numbers = permuted_conv_dims_numbers; const int64_t num_splits = ctrl_.number_of_splits; const int64_t output_offsets = convolution->shape().dimensions( permuted_conv_dims_numbers.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution))); const int64_t output_offsets_per_split = CeilOfRatio(output_offsets, num_splits); int64_t spatial_split_size = CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride; VLOG(1) << "spatial size " << c.spatial_size << " halo size " << c.halo_size << " spatial_split_size " << spatial_split_size; while (spatial_split_size * num_splits + c.halo_size - c.spatial_size < 0 || spatial_split_size < c.halo_size - c.inherent_low_padding) { spatial_split_size += c.stride; } VLOG(1) << "Modified spatial_split_size " << spatial_split_size; const int64_t new_space_size = activations_new->shape().dimensions(new_spatial_dims[0]); int64_t slice_size = spatial_split_size + c.halo_size; if (spatial_split_size > new_space_size) { TF_ASSIGN_OR_RETURN( activations_new, ChangeSpatialSizeOnSpaceToBatchedShape( activations_new, activations_batch_dim, old_batch_size, new_spatial_dims, spatial_split_size, true)); } else { if (spatial_split_size < new_space_size) { VLOG(3) << "Decreasing the spatial size while propagating spatial_split_size " << spatial_split_size << " new_space_size " << new_space_size; if (new_space_size % c.stride != 0 || c.base_dilation_factor != 1) { TF_ASSIGN_OR_RETURN( activations_new, ChangeSpatialSizeOnSpaceToBatchedShape( activations_new, activations_batch_dim, old_batch_size, new_spatial_dims, spatial_split_size)); } else { const int64_t additional_space_present = spatial_split_size % c.stride; spatial_split_size = new_space_size; slice_size = spatial_split_size + std::max(c.kernel_spatial_dim_size - c.stride - additional_space_present, static_cast<int64_t>(0)); } } } TF_ASSIGN_OR_RETURN( activations_new, HaloDuplicateWithSlice( activations_new, new_spatial_dims, activations_batch_dim, c.base_dilation_factor != 1 && c.inherent_low_padding != 0 ? (c.inherent_low_padding == c.base_dilation_factor ? 1 : 0) : c.inherent_low_padding, slice_size - spatial_split_size)); const int64_t rank = (convolution->shape().rank()); std::vector<int64_t> transpose_dims(rank); int dim_count = 0; std::map<int64_t, int64_t> dim_translator; for (int j = 0; j < permuted_conv_dims_numbers.output_spatial_dimensions_size(); ++j) { if (j == GetFirstChosenSpatialDim(convolution)) { dim_translator[permuted_conv_dims_numbers.output_batch_dimension()] = dim_count; new_dim_numbers.set_output_batch_dimension(dim_count++); } dim_translator[permuted_conv_dims_numbers.output_spatial_dimensions(j)] = dim_count; new_dim_numbers.set_output_spatial_dimensions(j, dim_count); dim_count++; } dim_translator[permuted_conv_dims_numbers.output_feature_dimension()] = dim_count; new_dim_numbers.set_output_feature_dimension(dim_count); int p = 0; for (const auto& entry : dim_translator) { transpose_dims[p] = entry.second; p++; } auto new_window = convolution->window(); const int64_t first_dim = GetFirstChosenSpatialDim(convolution); for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) { new_window.mutable_dimensions(first_dim + i) ->set_padding_high(c.high_padding_for_conv); new_window.mutable_dimensions(first_dim + i) ->set_padding_low(c.low_padding_for_conv); } TF_ASSIGN_OR_RETURN( HloInstruction * new_conv, MakeConvolveHlo( activations_new, convolution->mutable_operand(1), convolution->feature_group_count(), convolution->batch_group_count(), new_window, new_dim_numbers, convolution->precision_config(), convolution->shape().element_type())); convolution->SetupDerivedInstruction(new_conv); old_to_new_instrs_[convolution] = new_conv; VLOG(1) << "Space-to-batched convolution " << new_conv->ToString(); std::vector<int64_t> dim_map(kNumMappedDims); dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = original_conv_dims.output_batch_dimension(); dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = original_conv_dims.output_feature_dimension(); dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = original_conv_dims.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); instr_to_dim_map_[convolution] = dim_map; instr_to_dim_permute_map_[new_conv] = std::vector<int64_t>(transpose_dims); convs_to_visit_.erase(convolution); return absl::OkStatus(); } absl::Status ConvolutionVisitor::PropagateOnConcat(HloInstruction* concat) { auto first_operand = old_to_new_instrs_[concat->mutable_operand(0)]; auto permute_dims = instr_to_dim_permute_map_[first_operand]; const int64_t new_concat_dim = DimLookUp(permute_dims, concat->concatenate_dimension()); std::vector<HloInstruction*> new_operands(concat->operand_count()); for (int64_t i = 0; i < concat->operand_count(); ++i) { new_operands[i] = old_to_new_instrs_[concat->mutable_operand(i)]; } TF_ASSIGN_OR_RETURN( HloInstruction * new_concat, MakeConcatHlo(new_operands, new_concat_dim, &concat->metadata(), &concat->frontend_attributes())); old_to_new_instrs_[concat] = new_concat; instr_to_dim_map_[concat] = std::vector<int64_t>(instr_to_dim_map_[concat->mutable_operand(0)]); instr_to_dim_permute_map_[new_concat] = std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]); return absl::OkStatus(); } absl::Status ConvolutionVisitor::PropagateOnReverse(HloInstruction* reverse) { auto first_operand = old_to_new_instrs_[reverse->mutable_operand(0)]; auto permute_dims = instr_to_dim_permute_map_[first_operand]; std::vector<int64_t> new_reverse_dimensions(reverse->dimensions().size()); int dim_count = 0; for (auto dim : reverse->dimensions()) { new_reverse_dimensions[dim_count++] = DimLookUp(permute_dims, dim); } TF_ASSIGN_OR_RETURN(HloInstruction * new_reverse, MakeReverseHlo(first_operand, new_reverse_dimensions)); old_to_new_instrs_[reverse] = new_reverse; instr_to_dim_map_[reverse] = std::vector<int64_t>(instr_to_dim_map_[reverse->mutable_operand(0)]); instr_to_dim_permute_map_[new_reverse] = std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]); return absl::OkStatus(); } absl::Status ConvolutionVisitor::PropagateOnPad(HloInstruction* pad) { auto first_operand = old_to_new_instrs_[pad->mutable_operand(0)]; auto permute_dims = instr_to_dim_permute_map_[first_operand]; PaddingConfig padding_config; for (int i = 0; i < pad->shape().rank(); ++i) { auto dimension = padding_config.add_dimensions(); const int64_t old_dim = ReverseDimLookUp(permute_dims, i); auto old_padding = pad->padding_config().dimensions(old_dim); dimension->set_edge_padding_low(old_padding.edge_padding_low()); dimension->set_edge_padding_high(old_padding.edge_padding_high()); dimension->set_interior_padding(old_padding.interior_padding()); } HloInstruction* padding = pad->mutable_operand(1); TF_ASSIGN_OR_RETURN(auto new_pad, MakePadHlo(first_operand, padding, padding_config, &first_operand->metadata(), &first_operand->frontend_attributes())); old_to_new_instrs_[pad] = new_pad; instr_to_dim_map_[pad] = std::vector<int64_t>(instr_to_dim_map_[pad->mutable_operand(0)]); instr_to_dim_permute_map_[new_pad] = std::vector<int64_t>(instr_to_dim_permute_map_[first_operand]); return absl::OkStatus(); } absl::Status ConvolutionVisitor::PropagateOnSlice(HloInstruction* slice) { auto operand = old_to_new_instrs_[slice->mutable_operand(0)]; auto permute_dims = instr_to_dim_permute_map_[operand]; DimensionVector starts(slice->shape().rank()); DimensionVector limits(slice->shape().rank()); DimensionVector strides(slice->shape().rank()); for (int i = 0; i < slice->shape().rank(); ++i) { const int64_t old_dim = ReverseDimLookUp(permute_dims, i); if (slice->shape().dimensions(old_dim) == slice->operand(0)->shape().dimensions(old_dim)) { starts[i] = 0; strides[i] = 1; limits[i] = operand->shape().dimensions(i); continue; } starts[i] = slice->slice_starts(old_dim); strides[i] = slice->slice_strides(old_dim); limits[i] = slice->slice_limits(old_dim); } TF_ASSIGN_OR_RETURN( auto new_slice, MakeSliceHlo(operand, starts, limits, strides, &operand->metadata(), &operand->frontend_attributes())); old_to_new_instrs_[slice] = new_slice; instr_to_dim_map_[slice] = std::vector<int64_t>(instr_to_dim_map_[slice->mutable_operand(0)]); instr_to_dim_permute_map_[new_slice] = std::vector<int64_t>(instr_to_dim_permute_map_[operand]); return absl::OkStatus(); } absl::StatusOr<HloInstruction*> ConvolutionVisitor::TransposeAndMergeBatch( HloInstruction* activations, absl::Span<const int64_t> final_split_spatial_dim_positioning, int64_t activations_batch_dim, int64_t old_batch_size) { const int64_t spatial_dim_count = final_split_spatial_dim_positioning.size(); if (final_split_spatial_dim_positioning.size() > 1) { int64_t start_batch_dim_position = activations_batch_dim + 1; int64_t start_space_dim_position = start_batch_dim_position + spatial_dim_count; std::vector<int64_t> trans_dims(activations->shape().dimensions_size()); absl::c_iota(trans_dims, 0); for (int i = 0; i < spatial_dim_count; ++i) { trans_dims[start_batch_dim_position + i] = start_batch_dim_position + (spatial_dim_count - 1 - i) * 2; trans_dims[start_space_dim_position + i] = start_batch_dim_position + i * 2 + 1; } TF_ASSIGN_OR_RETURN(activations, MakeTransposeHlo(activations, trans_dims)); } std::vector<int64_t> batch_collapse_reshape_dims( activations->shape().dimensions().begin(), activations->shape().dimensions().end()); const int64_t collapsed_batch_size = old_batch_size * IPow<int64_t>(ctrl_.number_of_splits, spatial_dim_count); batch_collapse_reshape_dims.erase( batch_collapse_reshape_dims.begin() + activations_batch_dim, batch_collapse_reshape_dims.begin() + activations_batch_dim + spatial_dim_count); batch_collapse_reshape_dims[activations_batch_dim] = collapsed_batch_size; TF_ASSIGN_OR_RETURN(HloInstruction * batch_collapsed_reshape, MakeReshapeHlo(batch_collapse_reshape_dims, activations)); return batch_collapsed_reshape; } absl::StatusOr<HloInstruction*> ConvolutionVisitor::PerformSplitSpace( HloInstruction* activations, absl::Span<const int64_t> spatial_dimensions_to_split, int64_t activations_batch_dim, int64_t spatial_split_size, int64_t num_splits) { const int64_t old_batch_size = activations->shape().dimensions(activations_batch_dim); std::vector<int64_t> reshape_dimensions( activations->shape().dimensions().begin(), activations->shape().dimensions().end()); for (auto spatial_dimension_to_split : spatial_dimensions_to_split) { reshape_dimensions[spatial_dimension_to_split] = spatial_split_size; } int counter = 0; for (auto spatial_dimension_to_split : spatial_dimensions_to_split) { reshape_dimensions.insert( reshape_dimensions.begin() + (spatial_dimension_to_split + counter), num_splits); counter++; } TF_ASSIGN_OR_RETURN(HloInstruction * batch_increased_reshape, MakeReshapeHlo(reshape_dimensions, activations)); return TransposeAndMergeBatch( batch_increased_reshape, spatial_dimensions_to_split, activations_batch_dim, old_batch_size); } absl::StatusOr<HloInstruction*> ConvolutionVisitor::PadAndSplitSpace( HloInstruction* activations, absl::Span<const int64_t> spatial_dimensions_to_split, int64_t activations_batch_dim, int64_t high_padding, int64_t low_padding, int64_t spatial_split_size, int64_t num_splits) { const int64_t old_batch_size = activations->shape().dimensions(activations_batch_dim); if (high_padding || low_padding) { PaddingConfig padding_config = MakeNoPaddingConfig(activations->shape().dimensions_size()); for (auto spatial_dimension_to_split : spatial_dimensions_to_split) { padding_config.mutable_dimensions(spatial_dimension_to_split) ->set_edge_padding_high(high_padding); padding_config.mutable_dimensions(spatial_dimension_to_split) ->set_edge_padding_low(low_padding); } HloInstruction* padding = computation_->AddInstruction( HloInstruction::CreateConstant( LiteralUtil::Zero(activations->shape().element_type())), &activations->metadata(), &activations->frontend_attributes()); TF_ASSIGN_OR_RETURN(activations, MakePadHlo(activations, padding, padding_config, &activations->metadata(), &activations->frontend_attributes())); } VLOG(1) << "Initial padded activations shape " << activations->shape().ToString() << " old_batch_size " << old_batch_size << " activations_batch_dim " << activations_batch_dim; return PerformSplitSpace(activations, spatial_dimensions_to_split, activations_batch_dim, spatial_split_size, num_splits); } absl::StatusOr<std::pair<HloInstruction*, std::vector<int64_t>>> ConvolutionVisitor::SplitSpace( HloInstruction* activations, ConvolutionDimensionNumbers& dim_numbers, int64_t& activations_batch_dim, int64_t high_padding, int64_t low_padding, int64_t spatial_split_size, int64_t num_splits, std::vector<int64_t>* spatial_dimensions_to_split, bool is_backprop, bool is_rhs) { TF_ASSIGN_OR_RETURN( auto retval, BringSpaceNextToBatch(activations, dim_numbers, activations_batch_dim, spatial_dimensions_to_split, is_backprop, is_rhs)); activations = retval.instr; std::vector<int64_t> transpose_dims = retval.transpose_dims; TF_ASSIGN_OR_RETURN( auto new_activations, PadAndSplitSpace(activations, *spatial_dimensions_to_split, activations_batch_dim, high_padding, low_padding, spatial_split_size, num_splits)); return std::make_pair(new_activations, transpose_dims); } absl::StatusOr<HloInstruction*> ConvolutionVisitor::PropagateOnConstant( HloInstruction* consumer, HloInstruction* producer) { CHECK(old_to_new_instrs_.contains(producer)); HloInstruction* new_producer = old_to_new_instrs_[producer]; auto prod_transpose_dims = instr_to_dim_permute_map_[new_producer]; std::vector<int64_t> reversed_transpose_dims(prod_transpose_dims.size()); for (int64_t i = 0; i < prod_transpose_dims.size(); ++i) { reversed_transpose_dims[i] = ReverseDimLookUp(prod_transpose_dims, i); } TF_ASSIGN_OR_RETURN(consumer, MakeTransposeHlo(consumer, reversed_transpose_dims)); auto retval = GetSpatialDimsToSplit(producer); std::vector<int64_t> old_spatial_dims = retval.first; std::vector<int64_t> new_spatial_dims = retval.second; auto dim_map = instr_to_dim_map_[producer]; const int64_t old_batch_dim = dim_map[DimMapper(SpaceToBatchDimMap::kBatch)]; const int64_t old_space_dim = old_spatial_dims[0]; const int64_t new_batch_dim = DimLookUp(prod_transpose_dims, old_batch_dim); const int64_t new_space_dim = new_spatial_dims[0]; const int64_t old_batch_size = producer->shape().dimensions(old_batch_dim); const int64_t new_batch_size = old_batch_size * ctrl_.number_of_splits; const int64_t high_padding = (new_batch_size * new_producer->shape().dimensions(new_space_dim) - old_batch_size * producer->shape().dimensions(old_space_dim)) / old_batch_size; auto new_consumer = PadAndSplitSpace( consumer, new_spatial_dims, new_batch_dim, high_padding, 0, new_producer->shape().dimensions(new_space_dim), ctrl_.number_of_splits); return new_consumer; } absl::Status ConvolutionVisitor::PropagateOnBackpropFilterConv( HloInstruction* convolution) { auto activations_old = convolution->mutable_operand(0); const int64_t rhs_dilation = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .window_dilation(); auto original_conv_dims = convolution->convolution_dimension_numbers(); std::vector<int64_t> old_split_spatial_dims( ctrl_.dimension_from_end_to_convert), old_split_kernel_spatial_dims(ctrl_.dimension_from_end_to_convert); for (int i = 0; i < ctrl_.dimension_from_end_to_convert; ++i) { old_split_spatial_dims[i] = original_conv_dims.input_spatial_dimensions( GetFirstChosenSpatialDim(convolution) + i); old_split_kernel_spatial_dims[i] = original_conv_dims.kernel_spatial_dimensions( GetFirstChosenSpatialDim(convolution) + i); } auto kernel_old = convolution->mutable_operand(1); const int64_t old_kernel_split_dim_size = kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]); int64_t old_split_dim_size = activations_old->shape().dimensions(old_split_spatial_dims[0]); int64_t old_batch_dim = original_conv_dims.input_feature_dimension(); int64_t kernel_old_batch_dim = original_conv_dims.kernel_input_feature_dimension(); const int64_t old_batch_size = activations_old->shape().dimensions(old_batch_dim); CHECK(old_to_new_instrs_.contains(kernel_old) || old_to_new_instrs_.contains(activations_old)); HloInstruction* activations_new = nullptr; HloInstruction* kernel_new = nullptr; bool activations_locally_space_to_batched = false; bool kernel_locally_space_to_batched = false; std::vector<int64_t> permute_dims_kernel, permute_dims; if (old_to_new_instrs_.contains(activations_old)) { activations_new = old_to_new_instrs_[activations_old]; permute_dims = instr_to_dim_permute_map_[activations_new]; } if (old_to_new_instrs_.contains(kernel_old)) { kernel_new = old_to_new_instrs_[kernel_old]; permute_dims_kernel = instr_to_dim_permute_map_[kernel_new]; } if (!old_to_new_instrs_.contains(activations_old)) { kernel_new = old_to_new_instrs_[kernel_old]; permute_dims_kernel = instr_to_dim_permute_map_[kernel_new]; VLOG(1) << "Space-to-batching activations to enable space-to-depth"; const int64_t new_kernel_space_dim = DimLookUp(permute_dims_kernel, old_split_kernel_spatial_dims[0]); const int64_t new_kernel_split_dim_size = kernel_new->shape().dimensions(new_kernel_space_dim); const int64_t needed_spatial_size = rhs_dilation * new_kernel_split_dim_size; const int64_t pad_size = needed_spatial_size * ctrl_.number_of_splits - old_split_dim_size; ConvolutionDimensionNumbers tmp_dim_numbers; tmp_dim_numbers = original_conv_dims; TF_ASSIGN_OR_RETURN( auto retval, SplitSpace(activations_old, tmp_dim_numbers, old_batch_dim, pad_size, 0, needed_spatial_size, ctrl_.number_of_splits, &old_split_spatial_dims, true)); activations_new = retval.first; std::vector<int64_t> reversed_transpose_dims(retval.second.size()); for (int64_t i = 0; i < retval.second.size(); ++i) { reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i); } permute_dims = reversed_transpose_dims; VLOG(3) << "New Activations " << retval.first->ToString(); activations_locally_space_to_batched = true; } else if (!old_to_new_instrs_.contains(kernel_old)) { activations_new = old_to_new_instrs_[activations_old]; permute_dims = instr_to_dim_permute_map_[activations_new]; VLOG(1) << "Space-to-batching kernel to enable space-to-depth"; const int64_t new_space_dim = DimLookUp(permute_dims, old_split_spatial_dims[0]); const int64_t new_split_dim_size = activations_new->shape().dimensions(new_space_dim); const int64_t needed_spatial_size = CeilOfRatio(new_split_dim_size, rhs_dilation); int64_t old_kernel_split_dim_size = kernel_old->shape().dimensions(old_split_kernel_spatial_dims[0]); const int64_t pad_size = needed_spatial_size * ctrl_.number_of_splits - old_kernel_split_dim_size; ConvolutionDimensionNumbers tmp_dim_numbers; tmp_dim_numbers = original_conv_dims; TF_ASSIGN_OR_RETURN( auto retval, SplitSpace(kernel_old, tmp_dim_numbers, kernel_old_batch_dim, pad_size, 0, needed_spatial_size, ctrl_.number_of_splits, &old_split_kernel_spatial_dims, true, true)); kernel_new = retval.first; std::vector<int64_t> reversed_transpose_dims(retval.second.size()); for (int64_t i = 0; i < retval.second.size(); ++i) { reversed_transpose_dims[i] = ReverseDimLookUp(retval.second, i); } permute_dims_kernel = reversed_transpose_dims; VLOG(3) << "New kernel " << retval.first->ToString(); kernel_locally_space_to_batched = true; } CHECK_NE(activations_new, nullptr); CHECK_NE(kernel_new, nullptr); const int64_t new_spatial_dimension = activations_new->shape().dimensions_size(); auto permuted_conv_dims_numbers = original_conv_dims; int64_t activations_batch_dim = DimLookUp(permute_dims, original_conv_dims.input_feature_dimension()); int64_t activations_feature_dim = DimLookUp(permute_dims, original_conv_dims.input_batch_dimension()); const int64_t previous_spatial_dim_count = original_conv_dims.input_spatial_dimensions_size(); for (int64_t i = 0; i < previous_spatial_dim_count; ++i) { permuted_conv_dims_numbers.set_input_spatial_dimensions( i, DimLookUp(permute_dims, original_conv_dims.input_spatial_dimensions(i))); permuted_conv_dims_numbers.set_kernel_spatial_dimensions( i, DimLookUp(permute_dims_kernel, original_conv_dims.kernel_spatial_dimensions(i))); } permuted_conv_dims_numbers.add_input_spatial_dimensions( new_spatial_dimension); permuted_conv_dims_numbers.add_kernel_spatial_dimensions( new_spatial_dimension); permuted_conv_dims_numbers.add_output_spatial_dimensions( new_spatial_dimension); const int64_t previous_chosen_spatial_dim_in_output = permuted_conv_dims_numbers.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); permuted_conv_dims_numbers.set_output_spatial_dimensions( GetFirstChosenSpatialDim(convolution), new_spatial_dimension); permuted_conv_dims_numbers.set_output_spatial_dimensions( previous_spatial_dim_count, previous_chosen_spatial_dim_in_output); const int64_t kernel_input_feature_dim = DimLookUp( permute_dims_kernel, original_conv_dims.kernel_input_feature_dimension()); const int64_t kernel_output_feature_dim = DimLookUp(permute_dims_kernel, original_conv_dims.kernel_output_feature_dimension()); permuted_conv_dims_numbers.set_kernel_input_feature_dimension( kernel_input_feature_dim); permuted_conv_dims_numbers.set_kernel_output_feature_dimension( kernel_output_feature_dim); std::vector<int64_t> spatial_dimensions_to_split( ctrl_.count_of_dimensions_to_convert); const int64_t first_dim_to_split = GetFirstChosenSpatialDim(convolution); for (int64_t i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) { spatial_dimensions_to_split[i] = permuted_conv_dims_numbers.input_spatial_dimensions(first_dim_to_split + i); } const int64_t kernel_spatial_dimension_to_split = permuted_conv_dims_numbers.kernel_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); int64_t new_split_dim_size = activations_new->shape().dimensions(spatial_dimensions_to_split[0]); const int64_t kernel_new_split_dim_size = kernel_new->shape().dimensions(kernel_spatial_dimension_to_split); permuted_conv_dims_numbers.set_input_batch_dimension(activations_feature_dim); permuted_conv_dims_numbers.set_input_feature_dimension(activations_batch_dim); VLOG(1) << "Propagating on conv activations_batch_dim " << activations_batch_dim << " spatial_dimension_to_split " << spatial_dimensions_to_split[0] << " old_batch_size " << old_batch_size << " new_split_dim_size " << new_split_dim_size; TF_ASSIGN_OR_RETURN( auto retval, BringSpaceNextToBatch(activations_new, permuted_conv_dims_numbers, activations_batch_dim, &spatial_dimensions_to_split, true)); int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0]; std::vector<int64_t> transpose_dims = retval.transpose_dims; CHECK(!transpose_dims.empty()); activations_new = retval.instr; VLOG(1) << "Activations_new post BringSpaceNextToBatch " << activations_new->ToString(); VLOG(1) << "activations_batch_dim " << activations_batch_dim << " activations_feature_dim " << activations_feature_dim; const int64_t expected_split_dim_size = rhs_dilation * kernel_new_split_dim_size; if (new_split_dim_size != expected_split_dim_size) { CHECK_LT(new_split_dim_size, expected_split_dim_size); new_split_dim_size = expected_split_dim_size; TF_ASSIGN_OR_RETURN( activations_new, ChangeSpatialSizeOnSpaceToBatchedShape( activations_new, activations_batch_dim, old_batch_size, spatial_dimensions_to_split, new_split_dim_size, true)); } spatial_dimension_to_split = spatial_dimensions_to_split[0]; auto select_val = computation_->AddInstruction( HloInstruction::CreateConstant( LiteralUtil::Zero(activations_new->shape().element_type())), &activations_new->metadata(), &activations_new->frontend_attributes()); if (!activations_locally_space_to_batched) { TF_ASSIGN_OR_RETURN( activations_new, SelectValidPortion(activations_new, activations_old, select_val, activations_batch_dim, spatial_dimensions_to_split, old_batch_dim, old_split_spatial_dims)); } if (!kernel_locally_space_to_batched) { VLOG(3) << "Selecting the valid kernel area"; std::vector<int64_t> new_kernel_split_spatial_dims( ctrl_.dimension_from_end_to_convert); new_kernel_split_spatial_dims[0] = kernel_spatial_dimension_to_split; TF_ASSIGN_OR_RETURN( kernel_new, SelectValidPortion(kernel_new, kernel_old, select_val, kernel_input_feature_dim, new_kernel_split_spatial_dims, original_conv_dims.kernel_input_feature_dimension(), old_split_kernel_spatial_dims)); } auto new_dim_numbers = permuted_conv_dims_numbers; VLOG(2) << "New dim numbers " << new_dim_numbers.DebugString(); const int64_t inherent_low_padding = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .padding_low(); const int64_t inherent_high_padding = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .padding_high(); std::vector<HloInstruction*> activations_chunks; for (int64_t i = 0; i < inherent_low_padding; ++i) { HloInstruction* activations_to_use = nullptr; if (i == 0) { activations_to_use = activations_new; } else { activations_to_use = activations_chunks.back(); } TF_ASSIGN_OR_RETURN( HloInstruction * activations_slice, HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split, activations_batch_dim, 1, 0)); activations_chunks.push_back(activations_slice); } absl::c_reverse(activations_chunks); const int64_t expanded_kernel = old_kernel_split_dim_size * rhs_dilation - (rhs_dilation - 1); const int64_t overlap_count = old_split_dim_size - expanded_kernel + 1 + (inherent_low_padding < 0 ? inherent_low_padding : 0) + (inherent_high_padding < 0 ? inherent_high_padding : 0); VLOG(1) << "overlap_count " << overlap_count << " inherent_low_padding " << inherent_low_padding << " inherent_high_padding " << inherent_high_padding; const int64_t total_overlap_count = overlap_count + (inherent_low_padding > 0 ? inherent_low_padding : 0) + (inherent_high_padding > 0 ? inherent_high_padding : 0); for (int64_t i = 0; i < overlap_count; ++i) { HloInstruction* activations_to_use = nullptr; HloInstruction* activations_slice = nullptr; if (i == 0) { activations_to_use = activations_new; if (inherent_low_padding < 0) { TF_ASSIGN_OR_RETURN( activations_slice, HaloDuplicateWithSlice( activations_to_use, spatial_dimensions_to_split, activations_batch_dim, inherent_low_padding, 0)); } else { activations_slice = activations_to_use; } } else { activations_to_use = activations_chunks.back(); TF_ASSIGN_OR_RETURN(activations_slice, HaloDuplicateWithSlice( activations_to_use, spatial_dimensions_to_split, activations_batch_dim, -1, 0)); } activations_chunks.push_back(activations_slice); } int64_t high_padding_to_materialize = 0; if (inherent_high_padding > 0) { high_padding_to_materialize = std::max(total_overlap_count - (std::max(overlap_count, static_cast<int64_t>(0)) + std::max(inherent_low_padding, static_cast<int64_t>(0))), static_cast<int64_t>(0)); } for (int64_t i = 0; i < high_padding_to_materialize; ++i) { HloInstruction* activations_to_use = nullptr; activations_to_use = activations_chunks.back(); TF_ASSIGN_OR_RETURN( HloInstruction * activations_slice, HaloDuplicateWithSlice(activations_to_use, spatial_dimensions_to_split, activations_batch_dim, -1, 0)); activations_chunks.push_back(activations_slice); } for (int64_t i = 0; i < activations_chunks.size(); ++i) { std::vector<int64_t> input_sizes( activations_chunks[i]->shape().dimensions().begin(), activations_chunks[i]->shape().dimensions().end()); input_sizes.push_back(1); TF_ASSIGN_OR_RETURN(activations_chunks[i], MakeReshapeHlo(input_sizes, activations_chunks[i])); VLOG(1) << "new_spatial_dimension " << new_spatial_dimension << " slice " << activations_chunks[i]->ToString(); } TF_ASSIGN_OR_RETURN( activations_new, MakeConcatHlo(absl::MakeSpan(activations_chunks), new_spatial_dimension, &activations_old->metadata(), &activations_old->frontend_attributes())); std::vector<int64_t> kernel_sizes(kernel_new->shape().dimensions().begin(), kernel_new->shape().dimensions().end()); kernel_sizes.push_back(1); TF_ASSIGN_OR_RETURN(kernel_new, MakeReshapeHlo(kernel_sizes, kernel_new)); auto new_window = convolution->window(); new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution)) ->set_padding_high(-(rhs_dilation - 1)); new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution)) ->set_padding_low(0); new_window.mutable_dimensions(GetFirstChosenSpatialDim(convolution)) ->set_size(CeilOfRatio(new_split_dim_size, rhs_dilation)); auto window_dim = new_window.add_dimensions(); window_dim->set_base_dilation(1); window_dim->set_size(1); int64_t stride = 1; if (inherent_low_padding > total_overlap_count) { stride = activations_chunks.size(); } window_dim->set_stride(stride); window_dim->set_padding_low(0); window_dim->set_padding_high(0); window_dim->set_window_reversal(false); window_dim->set_window_dilation(1); TF_ASSIGN_OR_RETURN( HloInstruction * new_conv, MakeConvolveHlo( activations_new, kernel_new, convolution->feature_group_count(), convolution->batch_group_count(), new_window, new_dim_numbers, convolution->precision_config(), convolution->shape().element_type())); convolution->SetupDerivedInstruction(new_conv); VLOG(2) << "New backprop filter convolution " << new_conv->ToString(); std::vector<int64_t> output_sizes(new_conv->shape().dimensions().begin(), new_conv->shape().dimensions().end()); output_sizes.erase(output_sizes.begin() + new_dim_numbers.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution))); TF_ASSIGN_OR_RETURN(new_conv, MakeReshapeHlo(output_sizes, new_conv)); old_to_new_instrs_[convolution] = new_conv; VLOG(1) << "Space-to-featured convolution " << new_conv->ToString(); std::vector<int64_t> dim_map(kNumMappedDims); dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = original_conv_dims.output_batch_dimension(); dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = original_conv_dims.output_feature_dimension(); dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = original_conv_dims.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); instr_to_dim_map_[convolution] = dim_map; std::vector<int64_t> trans_dims(convolution->shape().dimensions_size()); absl::c_iota(trans_dims, 0); instr_to_dim_permute_map_[new_conv] = trans_dims; return absl::OkStatus(); } HloInstruction* ConvolutionVisitor::DoesConvolutionFeedReduceWindowOrSelectAndScatter( HloInstruction* instr, int64_t depth = kReduceWindowSearchDepth) { if (depth == 0) { return nullptr; } for (auto user : instr->users()) { if (user->opcode() == HloOpcode::kReduceWindow || user->opcode() == HloOpcode::kSelectAndScatter) { return user; } if (user->opcode() == HloOpcode::kConvolution || user->opcode() == HloOpcode::kPad || user->opcode() == HloOpcode::kTranspose || user->opcode() == HloOpcode::kDot) { continue; } auto ret = DoesConvolutionFeedReduceWindowOrSelectAndScatter(user, depth - 1); if (ret != nullptr) { return ret; } } return nullptr; } bool ConvolutionVisitor::DoesConvolutionFeedUnpropagatableOp( HloInstruction* instr, int64_t depth) { auto key = std::make_pair(instr, depth); if (unpropagatability_cache_.contains(key)) { return unpropagatability_cache_[key]; } if (depth == 0 || instr->user_count() == 0) { unpropagatability_cache_[key] = false; return false; } for (auto user : instr->users()) { if (IsOpcodeNonPropagatable(user)) { unpropagatability_cache_[key] = true; return true; } int64_t depth_to_use = depth; if (user->opcode() == HloOpcode::kConvolution || user->opcode() == HloOpcode::kDot) { depth_to_use--; } if (DoesConvolutionFeedUnpropagatableOp(user, depth_to_use)) { unpropagatability_cache_[key] = true; return true; } } unpropagatability_cache_[key] = false; return false; } bool ConvolutionVisitor::IsSpaceToBatchedSpaceSizeSuitable( HloInstruction* instr) { CHECK(instr->opcode() == HloOpcode::kSelectAndScatter || instr->opcode() == HloOpcode::kReduceWindow); auto old_producer = instr->mutable_operand(0); auto dim_map_val_op = instr_to_dim_map_[old_producer]; const int64_t old_space_dim = dim_map_val_op[DimMapper(SpaceToBatchDimMap::kSpace0)]; auto first_operand = old_to_new_instrs_[old_producer]; auto permute_dims_first_operand = instr_to_dim_permute_map_[first_operand]; const int64_t new_space_dim = DimLookUp(permute_dims_first_operand, old_space_dim); const int64_t window_size = instr->window().dimensions(old_space_dim).size(); if (first_operand->shape().dimensions(new_space_dim) < window_size) { return false; } return true; } ConvolutionVisitor::ConvDetails ConvolutionVisitor::GetConvolutionDetails( HloInstruction* convolution, ConvolutionDimensionNumbers& dim_numbers) { auto activations = convolution->mutable_operand(0); auto kernel = convolution->mutable_operand(1); const auto& kernel_shape = kernel->shape(); const int64_t kernel_spatial_dim = dim_numbers.kernel_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); int64_t kernel_spatial_dim_size = kernel_shape.dimensions(kernel_spatial_dim); if (IsForwardWindowDilatedConv(convolution, dim_numbers)) { const int64_t window_dilation_factor = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .window_dilation(); kernel_spatial_dim_size = (kernel_spatial_dim_size - 1) * (window_dilation_factor - 1) + kernel_spatial_dim_size; } std::vector<int64_t> spatial_dimensions_to_split = GetChosenSpatialDims(convolution); const int64_t spatial_dimension_to_split = spatial_dimensions_to_split[0]; const int64_t input_dim_size = activations->shape().dimensions(spatial_dimension_to_split); const int64_t inherent_low_padding = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .padding_low(); const int64_t inherent_high_padding = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .padding_high(); const int64_t stride = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .stride(); const int64_t base_dilation_factor = convolution->window() .dimensions(GetFirstChosenSpatialDim(convolution)) .base_dilation(); bool is_base_dilated = base_dilation_factor > 1; const int64_t spatial_size = input_dim_size + (is_base_dilated ? 0 : inherent_low_padding) + inherent_high_padding; const int64_t last_overlap = base_dilation_factor == inherent_low_padding ? kernel_spatial_dim_size : kernel_spatial_dim_size - 1; const int64_t halo_size = is_base_dilated ? last_overlap / base_dilation_factor : kernel_spatial_dim_size - 1; const int64_t high_padding_for_base_dilation = inherent_low_padding == 0 ? base_dilation_factor - 1 : last_overlap % base_dilation_factor; const int64_t high_padding_for_conv = is_base_dilated ? high_padding_for_base_dilation : 0; const int64_t low_padding_for_conv = is_base_dilated && (base_dilation_factor != inherent_low_padding) ? inherent_low_padding : 0; return ConvDetails{spatial_dimensions_to_split, inherent_low_padding, inherent_high_padding, stride, spatial_size, base_dilation_factor, halo_size, high_padding_for_conv, low_padding_for_conv, kernel_spatial_dim_size, input_dim_size}; } absl::Status ConvolutionVisitor::PerformSpaceToBatchOnConvolution( HloInstruction* convolution) { if (!ConsumeFuel("space-to-batch-converter", [&] { return "Skipping space-to-batch propagation because fuel over\n"; })) { return absl::OkStatus(); } VLOG(1) << "Handling conv " << convolution->ToString(); ConvolutionDimensionNumbers dim_numbers = convolution->convolution_dimension_numbers(); ConvDetails c = GetConvolutionDetails(convolution, dim_numbers); int64_t activations_batch_dim = dim_numbers.input_batch_dimension(); auto activations = convolution->mutable_operand(0); VLOG(1) << "spatial size " << c.spatial_size; if (c.spatial_size < 2 * ctrl_.number_of_splits) { return absl::OkStatus(); } auto original_conv = convolution; const int64_t output_spatial_dim = dim_numbers.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); const int64_t output_offsets = convolution->shape().dimensions(output_spatial_dim); const int64_t output_offsets_per_split = CeilOfRatio(output_offsets, ctrl_.number_of_splits); int64_t spatial_split_size = CeilOfRatio(output_offsets_per_split, c.base_dilation_factor) * c.stride; while (spatial_split_size * ctrl_.number_of_splits - c.spatial_size < 0) { spatial_split_size += c.stride; } auto reduce_window_or_select_and_scatter = DoesConvolutionFeedReduceWindowOrSelectAndScatter(convolution); if (reduce_window_or_select_and_scatter != nullptr && reduce_window_or_select_and_scatter->shape().IsArray() && reduce_window_or_select_and_scatter->shape().rank() == convolution->shape().rank()) { VLOG(2) << "DoesConvolutionFeedReduceWindowOrSelectAndScatter returned true"; const int64_t win_stride = std::max(reduce_window_or_select_and_scatter->window() .dimensions(output_spatial_dim) .stride(), static_cast<int64_t>(1)); CHECK_NE(win_stride, 0) << "Bad op " << reduce_window_or_select_and_scatter->ToString(); CHECK_NE(c.stride, 0) << "Bad op " << convolution->ToString(); while ((spatial_split_size / c.stride) % win_stride != 0) { spatial_split_size += c.stride; } } const int64_t slice_size = spatial_split_size + c.halo_size; const int64_t low_pad_to_handle_base_dilation = (c.base_dilation_factor > 1 && c.base_dilation_factor == c.inherent_low_padding) ? 1 : 0; int64_t pad_size = spatial_split_size * ctrl_.number_of_splits - c.spatial_size; bool handle_low_pad_in_first_reshape = false; if (pad_size > low_pad_to_handle_base_dilation) { pad_size -= low_pad_to_handle_base_dilation; handle_low_pad_in_first_reshape = true; } VLOG(1) << "spatial_split_size " << spatial_split_size << " stride " << c.stride << " slice_size " << slice_size; VLOG(1) << "spatial_dimension_to_split " << c.spatial_dimensions_to_split[0] << " num_splits " << ctrl_.number_of_splits << " kernel_spatial_dim_size " << c.kernel_spatial_dim_size; std::vector<int64_t> spatial_dimensions_to_split = c.spatial_dimensions_to_split; TF_ASSIGN_OR_RETURN( auto retval, SplitSpace( activations, dim_numbers, activations_batch_dim, c.inherent_high_padding + pad_size, c.base_dilation_factor == 1 ? c.inherent_low_padding : handle_low_pad_in_first_reshape ? low_pad_to_handle_base_dilation : 0, spatial_split_size, ctrl_.number_of_splits, &spatial_dimensions_to_split)); HloInstruction* batch_increased_reshape = retval.first; convolution->SetupDerivedInstruction(batch_increased_reshape); VLOG(1) << "First reshape done " << batch_increased_reshape->ToString(); TF_ASSIGN_OR_RETURN( activations, HaloDuplicateWithSlice( batch_increased_reshape, spatial_dimensions_to_split, activations_batch_dim, handle_low_pad_in_first_reshape ? 0 : low_pad_to_handle_base_dilation, c.halo_size)); VLOG(1) << "Batch merge done " << activations->ToString(); auto new_dim_numbers = dim_numbers; const int64_t rank = convolution->shape().rank(); std::vector<int64_t> transpose_dims(rank); int dim_count = 0; std::map<int64_t, int64_t> dim_translator; for (int j = 0; j < dim_numbers.output_spatial_dimensions_size(); ++j) { if (j == GetFirstChosenSpatialDim(convolution)) { dim_translator[dim_numbers.output_batch_dimension()] = dim_count; new_dim_numbers.set_output_batch_dimension(dim_count++); } dim_translator[dim_numbers.output_spatial_dimensions(j)] = dim_count; new_dim_numbers.set_output_spatial_dimensions(j, dim_count); dim_count++; } dim_translator[dim_numbers.output_feature_dimension()] = dim_count; new_dim_numbers.set_output_feature_dimension(dim_count); int p = 0; for (const auto& entry : dim_translator) { transpose_dims[p] = entry.second; p++; } VLOG(1) << "New dim numbers " << new_dim_numbers.DebugString() << " batch dim " << new_dim_numbers.input_batch_dimension(); auto new_window = convolution->window(); const int64_t first_dim = GetFirstChosenSpatialDim(convolution); for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) { new_window.mutable_dimensions(first_dim + i) ->set_padding_high(c.high_padding_for_conv); new_window.mutable_dimensions(first_dim + i) ->set_padding_low(c.low_padding_for_conv); } TF_ASSIGN_OR_RETURN( HloInstruction * new_conv, MakeConvolveHlo( activations, convolution->mutable_operand(1), convolution->feature_group_count(), convolution->batch_group_count(), new_window, new_dim_numbers, convolution->precision_config(), convolution->shape().element_type(), &convolution->metadata(), &convolution->frontend_attributes())); convolution->SetupDerivedInstruction(new_conv); batch_to_space_map_[convolution->mutable_operand(0)] = convolution->mutable_operand(0); VLOG(1) << "Space-to-batched convolution " << new_conv->ToString(); std::vector<int64_t> new_output_split_spatial_dims( ctrl_.count_of_dimensions_to_convert), old_output_split_spatial_dims(ctrl_.count_of_dimensions_to_convert); for (int i = 0; i < ctrl_.count_of_dimensions_to_convert; ++i) { old_output_split_spatial_dims[i] = dim_numbers.output_spatial_dimensions(first_dim + i); new_output_split_spatial_dims[i] = new_dim_numbers.output_spatial_dimensions(first_dim + i); } const int64_t output_batch_dim = new_dim_numbers.output_batch_dimension(); auto select_val = computation_->AddInstruction( HloInstruction::CreateConstant( LiteralUtil::Zero(new_conv->shape().element_type())), &convolution->metadata(), &convolution->frontend_attributes()); TF_ASSIGN_OR_RETURN( new_conv, SelectValidPortion(new_conv, original_conv, select_val, output_batch_dim, new_output_split_spatial_dims, dim_numbers.output_batch_dimension(), old_output_split_spatial_dims)); old_to_new_instrs_[original_conv] = new_conv; std::vector<int64_t> dim_map(kNumMappedDims); dim_map[DimMapper(SpaceToBatchDimMap::kBatch)] = dim_numbers.output_batch_dimension(); dim_map[DimMapper(SpaceToBatchDimMap::kFeature)] = dim_numbers.output_feature_dimension(); dim_map[DimMapper(SpaceToBatchDimMap::kSpace0)] = dim_numbers.output_spatial_dimensions( GetFirstChosenSpatialDim(convolution)); instr_to_dim_map_[original_conv] = dim_map; instr_to_dim_permute_map_[new_conv] = std::vector<int64_t>(transpose_dims); if (non_propagatable_instrs_.count(convolution) > 0) { non_propagatable_instrs_.erase(convolution); } TF_CHECK_OK(PropagateOnUsers(original_conv)); return absl::OkStatus(); } } absl::StatusOr<bool> SpaceToBatchConverter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 2, "SpaceToBatchConverter::Run(), before:\n" + module->ToString()); bool changed = false; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { ConvolutionVisitor visitor(ctrl_, comp); if (visitor.Run().value()) { changed = true; } VLOG(1) << "Done operating on computation"; } XLA_VLOG_LINES(2, "SpaceToBatchConverter::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/space_to_batch_converter.h" #include <memory> #include <string> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/types.h" namespace xla { namespace { using SpaceToBatchConverterTest = HloTestBase; namespace op = testing::opcode_matchers; TEST_F(SpaceToBatchConverterTest, SimpleBatch1) { std::string hlo_string = R"( HloModule module ENTRY computation { %p0 = bf16[1,258,258,32] parameter(0) %p1 = bf16[3,3,32,32] parameter(1) ROOT %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, dim_labels=b01f_01io->b01f } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Transpose()); EXPECT_THAT(root->operand(0), op::Slice()); auto reshape = root->operand(0)->operand(0); EXPECT_THAT(reshape, op::Reshape()); auto previous_reshape = reshape->operand(0); EXPECT_THAT(previous_reshape, op::Reshape()); EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution()); const int64_t batch_dim = previous_reshape->operand(0) ->operand(1) ->convolution_dimension_numbers() .output_batch_dimension(); EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 1); } TEST_F(SpaceToBatchConverterTest, SimpleBatch1ConvXpose) { std::string hlo_string = R"( HloModule module ENTRY computation { %p0 = bf16[1,258,258,32] parameter(0) %p1 = bf16[3,3,32,32] parameter(1) %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, dim_labels=b01f_01io->b01f ROOT tr = bf16[1,256,256,32] transpose(%convolution), dimensions={0,2,1,3} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Transpose()); EXPECT_THAT(root->operand(0), op::Slice()); auto reshape = root->operand(0)->operand(0); EXPECT_THAT(reshape, op::Reshape()); auto previous_reshape = reshape->operand(0); EXPECT_THAT(previous_reshape, op::Reshape()); EXPECT_THAT(previous_reshape->operand(0), op::Select()); EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution()); } TEST_F(SpaceToBatchConverterTest, SimpleBatch1WithReduceWindow) { std::string hlo_string = R"( HloModule module adder (lhs: bf16[], rhs: bf16[]) -> bf16[] { lhs = bf16[] parameter(0) rhs = bf16[] parameter(1) ROOT add = bf16[] add(lhs, rhs) } ENTRY computation { %p0 = bf16[1,258,258,32] parameter(0) %p1 = bf16[3,3,32,32] parameter(1) %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, dim_labels=b01f_01io->b01f %constant = bf16[3] constant({1.0, 2.0, 3.0}) %tuple = (bf16[1,256,256,32], bf16[3])tuple(%convolution, %constant) ROOT %gte = bf16[1,256,256,32] get-tuple-element(%tuple), index=0 %gte2 = bf16[3]get-tuple-element(%tuple), index=1 %init = bf16[] constant(1.0) %reduce-window = bf16[3] reduce-window(bf16[3] %gte2, bf16[] %init), window={size=1}, to_apply=%adder } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); } TEST_F(SpaceToBatchConverterTest, SimpleBatch2) { std::string hlo_string = R"( HloModule module ENTRY computation { %p0 = bf16[2,258,258,32] parameter(0) %p1 = bf16[3,3,32,32] parameter(1) ROOT %convolution = bf16[2,256,256,32] convolution(%p0, %p1), window={size=3x3}, dim_labels=b01f_01io->b01f } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 1}); ASSERT_FALSE(converter.Run(module.get()).value()); } TEST_F(SpaceToBatchConverterTest, UnpropagatableOp) { std::string hlo_string = R"( HloModule module ENTRY comp { %reduce-window = bf16[1,76,76,64]{3,2,1,0} parameter(0) %convert.13 = bf16[3,3,64,64]{3,2,1,0} parameter(1) %convolution.1 = bf16[64,76,76,1]{0,2,1,3} convolution( %reduce-window, %convert.13), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->f01b ROOT custom-call.5079 = bf16[64,152,152,1]{0,2,1,3} custom-call(%convolution.1), custom_call_target="ResizeNearest" } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 1}); ASSERT_FALSE(converter.Run(module.get()).value()); } TEST_F(SpaceToBatchConverterTest, Batch1WithStrideAndPad) { std::string hlo_string = R"( HloModule module ENTRY computation { %p0 = bf16[1,224,224,3]{3,2,1,0} parameter(0) %p1 = bf16[7,7,3,64]{3,2,1,0} parameter(1) ROOT %convolution.3 = bf16[1,112,112,64]{3,2,1,0} convolution(%p0, %p1), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 4}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Transpose()); EXPECT_THAT(root->operand(0), op::Slice()); auto reshape = root->operand(0)->operand(0); EXPECT_THAT(reshape, op::Reshape()); auto previous_reshape = reshape->operand(0); EXPECT_THAT(previous_reshape, op::Reshape()); EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution()); const int64_t batch_dim = previous_reshape->operand(0) ->operand(1) ->convolution_dimension_numbers() .output_batch_dimension(); EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4); } TEST_F(SpaceToBatchConverterTest, Batch1WithBaseDilation) { std::string hlo_string = R"( HloModule module ENTRY computation { %p2 = bf16[1,28,28,128]{3,0,2,1} parameter(0) %p3 = bf16[1,1,512,128]{3,2,1,0} parameter(1) ROOT %c = bf16[1,56,56,512]{3,0,2,1} convolution(%p2, %p3), window={size=1x1 pad=0_1x0_1 lhs_dilate=2x2 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Transpose()); EXPECT_THAT(root->operand(0), op::Slice()); auto reshape = root->operand(0)->operand(0); EXPECT_THAT(reshape, op::Reshape()); auto previous_reshape = reshape->operand(0); EXPECT_THAT(previous_reshape, op::Reshape()); EXPECT_THAT(previous_reshape->operand(0)->operand(1), op::Convolution()); const int64_t batch_dim = previous_reshape->operand(0) ->operand(1) ->convolution_dimension_numbers() .output_batch_dimension(); EXPECT_GT(previous_reshape->operand(0)->shape().dimensions(batch_dim), 4); } TEST_F(SpaceToBatchConverterTest, PropagateThroughDot) { std::string hlo_string = R"( HloModule module ENTRY computation { %p0 = bf16[1,258,258,32] parameter(0) %p1 = bf16[3,3,32,32] parameter(1) %convolution = bf16[1,256,256,32] convolution(%p0, %p1), window={size=3x3}, dim_labels=b01f_01io->b01f %p2 = bf16[32,32] parameter(2) ROOT %dot.5010 = bf16[1,256,256,32] dot(%convolution, %p2), lhs_contracting_dims={3}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); } TEST_F(SpaceToBatchConverterTest, PropagateOnTrivialReduce) { std::string hlo_string = R"( HloModule module %region_1.37 (Arg_0.38: f32[], Arg_1.39: f32[]) -> f32[] { %Arg_0.38 = f32[] parameter(0) %Arg_1.39 = f32[] parameter(1) ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39) } ENTRY computation { %p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0) %p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1) %c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1), window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f %constant.5 = f32[] constant(0) ROOT %reduce.41 = f32[7,160,400]{2,1,0} reduce(%c, %constant.5), dimensions={3}, to_apply=%region_1.37 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Transpose()); EXPECT_THAT(root->operand(0)->operand(0)->operand(0)->operand(0), op::Reduce()); auto new_reduce = root->operand(0)->operand(0)->operand(0)->operand(0); EXPECT_EQ(new_reduce->shape().dimensions(1), 7 * 8); } TEST_F(SpaceToBatchConverterTest, DoNotPropagateOnTupleReduce) { std::string hlo_string = R"( HloModule module %minmax_func.2717 { %lhs_value.2718 = f32[] parameter(0) %rhs_value.2720 = f32[] parameter(2) %compare.2722 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=GE %select.2723 = f32[] select(pred[] %compare.2722, f32[] %lhs_value.2718, f32[] %rhs_value.2720) %compare.2725 = pred[] compare(f32[] %lhs_value.2718, f32[] %rhs_value.2720), direction=EQ %lhs_index.2719 = f32[] parameter(1) %rhs_index.2721 = f32[] parameter(3) %minimum.2726 = f32[] minimum(f32[] %lhs_index.2719, f32[] %rhs_index.2721) %select.2724 = f32[] select(pred[] %compare.2722, f32[] %lhs_index.2719, f32[] %rhs_index.2721) %select.2727 = f32[] select(pred[] %compare.2725, f32[] %minimum.2726, f32[] %select.2724) ROOT %tuple.4 = (f32[], f32[]) tuple(f32[] %select.2723, f32[] %select.2727) } ENTRY computation { %p0 = bf16[7,320,800,3]{3,2,1,0} parameter(0) %p1 = bf16[3,3,3,32]{3,2,1,0} parameter(1) %c = f32[7,160,400,32]{3,2,1,0} convolution( %p0, %p1), window={size=3x3 stride=2x2 pad=0_1x0_1}, dim_labels=b01f_01io->b01f %constant.5 = f32[] constant(0) %constant.6 = f32[] constant(1) ROOT %reduce.36 = (f32[7,160,400]{2,1,0}, f32[7,160,400]{2,1,0}) reduce(%c, %c, %constant.5, %constant.6), dimensions={3}, to_apply=%minmax_func.2717 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Reduce()); } TEST_F(SpaceToBatchConverterTest, ReduceDegenerateDim) { std::string hlo_string = R"( HloModule module %region_42.4982 { %Arg_0.38 = f32[] parameter(0) %Arg_1.39 = f32[] parameter(1) ROOT %add.40 = f32[] add(f32[] %Arg_0.38, f32[] %Arg_1.39) } ENTRY computation { %p0 = f32[2,1,84,84,3]{4,3,2,1,0} parameter(0) %p1 = f32[3,3,3,3,32]{4,3,2,1,0} parameter(1) %constant.10559 = f32[] constant(0) %convolution.98 = f32[2,1,84,84,32]{4,3,2,1,0} convolution(%p0, %p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f ROOT %reduce.2606 = f32[2,84,84]{2,1,0} reduce(f32[2,1,84,84,32]{4,3,2,1,0} %convolution.98, f32[] %constant.10559), dimensions={1,4}, to_apply=%region_42.4982 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Transpose()); EXPECT_THAT(root->operand(0), op::Slice()); } TEST_F(SpaceToBatchConverterTest, PropagateOnReduce) { std::string hlo_string = R"( HloModule xla_computation_unknown.14 region_0.134 { Arg_0.135 = f32[] parameter(0) Arg_1.136 = f32[] parameter(1) ROOT add.137 = f32[] add(Arg_0.135, Arg_1.136) } ENTRY main.140 { p0 = bf16[1,512,32,128]{3,2,1,0} parameter(0) p1 = f32[3,3,128,128]{3,2,1,0} parameter(1) %convolution.755 = f32[1,512,32,128]{3,2,1,0} convolution(p0, p1), window={size=3x3 pad=1_1x1_1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f %constant.19458 = f32[] constant(0) ROOT %reduce.1354 = f32[128]{0} reduce(%convolution.755, %constant.19458), dimensions={0,1,2}, to_apply=%region_0.134 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); auto computation = module->entry_computation(); SpaceToBatchConverter converter( SpaceToBatchController{true, true, true, true, 8}); ASSERT_TRUE(converter.Run(module.get()).value()); HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Reduce()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/space_to_batch_converter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b5a9efe7-04e9-4757-ada9-15a588372915
cpp
tensorflow/tensorflow
host_offload_utils
tensorflow/core/profiler/utils/host_offload_utils.cc
third_party/xla/xla/service/host_offload_utils_test.cc
#include "tensorflow/core/profiler/utils/host_offload_utils.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <optional> #include <queue> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/log.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "xla/tsl/profiler/utils/timespan.h" #include "tensorflow/core/profiler/utils/trace_utils.h" #include "tensorflow/core/profiler/utils/xplane_builder.h" #include "tensorflow/core/profiler/utils/xplane_schema.h" #include "tensorflow/core/profiler/utils/xplane_visitor.h" namespace tensorflow { namespace profiler { bool HostOffloadEventProcessor::IsHostOffloadOpName( const XEventVisitor& event) const { static constexpr absl::string_view keywords[] = {"copy-start", "copy-done", "dynamic-slice-start", "dynamic-slice-done", "dynamic-update-slice-start", "dynamic-update-slice-done"}; for (const auto& keyword : keywords) { if (absl::StrContains(event.DisplayName(), keyword) && absl::StrContains(event.Name(), host_memory_label_)) { return true; } } return false; } std::string HostOffloadEventProcessor::GetOffloadInstructionID( absl::string_view op_name) const { std::vector<std::string> op_name_vec = absl::StrSplit(op_name, '.'); if (op_name_vec.size() < 2) { return "0"; } return op_name_vec.back(); } std::string HostOffloadEventProcessor::GetOffloadInstructionName( absl::string_view op_name) const { std::string display_id = GetOffloadInstructionID(op_name); size_t startPos = op_name.find("-start"); size_t donePos = op_name.find("-done"); absl::string_view display_opname; if (startPos != absl::string_view::npos) { display_opname = op_name.substr(0, startPos); } else if (donePos != absl::string_view::npos) { display_opname = op_name.substr(0, donePos); } else { LOG(WARNING) << "Invalid op name: " << op_name; display_opname = op_name; } return absl::StrCat("offload-", display_opname, ".", display_id); } void HostOffloadEventProcessor::ProcessHostOffloadOpEvent( const XEventVisitor& event, std::optional<int64_t> group_id) { std::string display_opname = GetOffloadInstructionName(event.DisplayName()); auto [iter, inserted] = seen_events_.try_emplace(display_opname); std::queue<const XEventVisitor*>& events = iter->second; if (absl::StrContains(event.DisplayName(), "-start")) { events.push(&event); return; } else if (absl::StrContains(event.DisplayName(), "-done")) { if (events.empty()) { LOG(INFO) << "No corresponding start event found for " << event.DisplayName(); return; } const XEventVisitor* start_event = events.front(); events.pop(); tsl::profiler::Timespan event_span = tsl::profiler::Timespan::FromEndPoints( start_event->GetTimespan().begin_ps(), event.GetTimespan().end_ps()); int line_builder_index = -1; uint64_t minimum_end_time_frontier = event_span.begin_ps(); for (int i = 0; i < host_offload_op_line_builders_.size(); ++i) { if (host_offload_op_line_builders_[i].event_end_time_frontier_ns <= minimum_end_time_frontier) { line_builder_index = i; minimum_end_time_frontier = host_offload_op_line_builders_[i].event_end_time_frontier_ns; } } constexpr int kMaxHostOffloadOpLinesSize = kThreadIdHostOffloadOpEnd - kThreadIdHostOffloadOpStart + 1; if (line_builder_index == -1) { if (host_offload_op_line_builders_.size() < kMaxHostOffloadOpLinesSize) { XLineBuilder lb = plane_builder_->GetOrCreateLine( kThreadIdHostOffloadOpStart + host_offload_op_line_builders_.size()); lb.SetName(absl::StrFormat("%s row %d", kHostOffloadOpLineName, host_offload_op_line_builders_.size())); lb.SetTimestampNs(start_timestamp_ns_); host_offload_op_line_builders_.push_back( {std::move(lb), event_span.end_ps()}); } line_builder_index = host_offload_op_line_builders_.size() - 1; } host_offload_op_line_builders_[line_builder_index] .event_end_time_frontier_ns = std::max(host_offload_op_line_builders_[line_builder_index] .event_end_time_frontier_ns, event_span.end_ps()); XEventMetadata* host_offload_copy_metadata = plane_builder_->CreateEventMetadata(); host_offload_copy_metadata->set_display_name(display_opname); XEventBuilder event_builder = host_offload_op_line_builders_[line_builder_index] .line_builder.AddEvent(*host_offload_copy_metadata); event_builder.SetTimespan(event_span); const XStatMetadata& async_stat = *plane_builder_->GetOrCreateStatMetadata( GetStatTypeStr(StatType::kIsAsync)); event_builder.AddStatValue(async_stat, 1); } } } }
#include "xla/service/host_offload_utils.h" #include <string> #include <vector> #include <gtest/gtest.h> #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace host_offload_utils { namespace { class HostOffloadUtilsTest : public HloTestBase {}; TEST_F(HostOffloadUtilsTest, SimpleGetSuccessorsGetPredecessorsTest) { const std::string& hlo_string = R"( HloModule my_module ENTRY main { data_param = f32[1,2048,2048] parameter(0) index_param = s32[] parameter(1) constant_f32_0 = f32[] constant(0) constant_s32_0 = s32[] constant(0) broadcast = f32[2,2048,2048] broadcast(constant_f32_0), dimensions={} offload_custom_call = f32[1,2048,2048] custom-call(data_param), custom_call_target="MoveToHost" dynamic_update_slice = f32[2,2048,2048] dynamic-update-slice(broadcast, offload_custom_call, index_param, constant_s32_0, constant_s32_0) dynamic_slice = f32[1,2048,2048] dynamic-slice(dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048} ROOT load_custom_call = f32[1,2048,2048] custom-call(dynamic_slice), custom_call_target="MoveToDevice" } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* data_param = FindInstruction(module.get(), "data_param"); ASSERT_NE(data_param, nullptr); HloInstruction* offload_custom_call = FindInstruction(module.get(), "offload_custom_call"); ASSERT_NE(offload_custom_call, nullptr); TF_ASSERT_OK_AND_ASSIGN( std::vector<InstructionAndShapeIndex> succ, GetSuccessors(InstructionAndShapeIndex(data_param, {}))); std::vector<InstructionAndShapeIndex> expected_succ = { InstructionAndShapeIndex(offload_custom_call, {})}; EXPECT_EQ(succ, expected_succ); std::vector<InstructionAndShapeIndex> pred = GetPredecessors(InstructionAndShapeIndex(offload_custom_call, {})); std::vector<InstructionAndShapeIndex> expected_pred = { InstructionAndShapeIndex(data_param, {})}; EXPECT_EQ(pred, expected_pred); } TEST_F(HostOffloadUtilsTest, ComputationGetSuccessorsGetPredecessorsTest) { const std::string& hlo_string = R"( HloModule my_module other_computation { param_0 = f32[2048] parameter(0) param_1 = f32[2048] parameter(1) ROOT tuple = (f32[2048], f32[2048]) tuple(param_0, param_1) } ENTRY main { data_param = f32[2048] parameter(0) other_param = f32[2048] parameter(1) offload_custom_call = f32[2048] custom-call(data_param), custom_call_target="MoveToHost" call = (f32[2048], f32[2048]) call(offload_custom_call, other_param), to_apply=other_computation gte_0 = f32[2048] get-tuple-element(call), index=0 gte_1 = f32[2048] get-tuple-element(call), index=1 ROOT load_custom_call = f32[2048] custom-call(gte_0), custom_call_target="MoveToDevice" } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloInstruction* call = FindInstruction(module.get(), "call"); ASSERT_NE(call, nullptr); HloInstruction* gte_0 = FindInstruction(module.get(), "gte_0"); ASSERT_NE(gte_0, nullptr); HloInstruction* tuple = FindInstruction(module.get(), "tuple"); ASSERT_NE(tuple, nullptr); TF_ASSERT_OK_AND_ASSIGN(std::vector<InstructionAndShapeIndex> succ, GetSuccessors(InstructionAndShapeIndex(call, {0}))); std::vector<InstructionAndShapeIndex> expected_succ = { InstructionAndShapeIndex(gte_0, {})}; EXPECT_EQ(succ, expected_succ); std::vector<InstructionAndShapeIndex> pred = GetPredecessors(InstructionAndShapeIndex(call, {0})); std::vector<InstructionAndShapeIndex> expected_pred = { InstructionAndShapeIndex(tuple, {0})}; EXPECT_EQ(pred, expected_pred); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/host_offload_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offload_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
824e9a92-d73e-40d9-bb47-186cbd4b9da5
cpp
tensorflow/tensorflow
host_memory_transfer_asyncifier
third_party/xla/xla/service/host_memory_transfer_asyncifier.cc
third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc
#include "xla/service/host_memory_transfer_asyncifier.h" #include <cstdint> #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { class HostMemoryTransferAsyncifierVisitor : public DfsHloVisitorWithDefault { public: explicit HostMemoryTransferAsyncifierVisitor(int64_t host_memory_space_color) : kHostMemorySpaceColor(host_memory_space_color) {} bool Changed() const { return changed_; } absl::Status DefaultAction(HloInstruction* hlo_instruction) override { return absl::OkStatus(); } absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override { HloInstruction* dynamic_slice_operand = dynamic_slice->mutable_operand(0); if (!dynamic_slice->shape().has_layout()) { return InternalStrCat(dynamic_slice->name(), " does not have a layout."); } if (!dynamic_slice_operand->shape().has_layout()) { return InternalStrCat(dynamic_slice->name(), "'s operand, ", dynamic_slice_operand->name(), ", does not have a layout."); } VLOG(3) << absl::StreamFormat( "\"%s\" from S(%d) to S(%d)", dynamic_slice->name(), dynamic_slice_operand->shape().layout().memory_space(), dynamic_slice->shape().layout().memory_space()); if (dynamic_slice_operand->shape().layout().memory_space() != kHostMemorySpaceColor) { return absl::OkStatus(); } if (dynamic_slice->shape().layout().memory_space() != xla::Layout::kDefaultMemorySpace) { return absl::OkStatus(); } const Shape context_shape = ShapeUtil::MakeScalarShape(U32); const Shape transfer_bytes_shape = ShapeUtil::MakeScalarShape(S32); TF_ASSIGN_OR_RETURN( HloInstruction * async_done, dynamic_slice->parent()->CreateAsyncInstructions( dynamic_slice, {context_shape, transfer_bytes_shape})); VLOG(1) << "DynamicSlice \"" << dynamic_slice->ToString() << "\" is slicing from host memory. Converting to async " << async_done->ToString(); MarkAsChanged(); return absl::OkStatus(); } absl::Status HandleDynamicUpdateSlice( HloInstruction* dynamic_update_slice) override { HloInstruction* dynamic_update_slice_operand = dynamic_update_slice->mutable_operand(0); HloInstruction* dynamic_update_slice_update = dynamic_update_slice->mutable_operand(1); if (!dynamic_update_slice->shape().has_layout()) { return InternalStrCat(dynamic_update_slice->name(), " does not have a layout."); } if (!dynamic_update_slice_operand->shape().has_layout()) { return InternalStrCat(dynamic_update_slice->name(), "'s operand, ", dynamic_update_slice_operand->name(), ", does not have a layout."); } if (!dynamic_update_slice_update->shape().has_layout()) { return InternalStrCat(dynamic_update_slice->name(), "'s update, ", dynamic_update_slice_update->name(), ", does not have a layout."); } if (dynamic_update_slice_update->shape().layout().memory_space() != xla::Layout::kDefaultMemorySpace) { return absl::OkStatus(); } if (dynamic_update_slice->shape().layout().memory_space() != kHostMemorySpaceColor) { return absl::OkStatus(); } if (dynamic_update_slice_operand->shape().layout().memory_space() != dynamic_update_slice->shape().layout().memory_space()) { return InternalStrCat( "Unexpected that ", dynamic_update_slice_operand->name(), "'s memory space is not the same as the dynamic-update-slice."); } const Shape context_shape = ShapeUtil::MakeScalarShape(U32); TF_ASSIGN_OR_RETURN(HloInstruction * async_done, dynamic_update_slice->parent()->CreateAsyncInstructions( dynamic_update_slice, {context_shape})); VLOG(1) << "DynamicUpdateSlice \"" << dynamic_update_slice->ToString() << "\" is slicing into host memory space. Converting to async " << async_done->ToString(); MarkAsChanged(); return absl::OkStatus(); } absl::Status HandleCopy(HloInstruction* copy) override { HloInstruction* operand = copy->mutable_operand(0); if (!operand->shape().has_layout()) { return InternalStrCat(operand->name(), " does not have a layout."); } if (!copy->shape().has_layout()) { return InternalStrCat(copy->name(), " does not have a layout."); } const auto copy_src_memory_space = operand->shape().layout().memory_space(); const auto copy_dst_memory_space = copy->shape().layout().memory_space(); if (!((copy_src_memory_space == kHostMemorySpaceColor && copy_dst_memory_space == xla::Layout::kDefaultMemorySpace) || (copy_src_memory_space == xla::Layout::kDefaultMemorySpace && copy_dst_memory_space == kHostMemorySpaceColor))) { VLOG(2) << "Skipping copy because it is not a copy between device memory and " "host memory: " << copy->ToString(); return absl::OkStatus(); } const Shape context_shape = ShapeUtil::MakeScalarShape(U32); TF_ASSIGN_OR_RETURN( HloInstruction * async_done, copy->parent()->CreateAsyncInstructions(copy, {context_shape})); VLOG(1) << "Copy \"" << copy->name() << "\" is between device and host memory space. Converting to async " << async_done->ToString(); MarkAsChanged(); return absl::OkStatus(); } private: const int64_t kHostMemorySpaceColor; bool changed_ = false; void MarkAsChanged() { changed_ = true; } }; } absl::StatusOr<bool> HostMemoryTransferAsyncifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { HostMemoryTransferAsyncifierVisitor visitor(kHostMemorySpaceColor); for (HloComputation* computation : module->MakeNonfusionComputations()) { TF_RETURN_IF_ERROR(computation->Accept(&visitor)); } return visitor.Changed(); } }
#include "xla/service/host_memory_transfer_asyncifier.h" #include <cstdint> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace m = ::xla::match; class HostMemoryTransferAsyncifierTest : public HloTestBase { protected: absl::StatusOr<bool> RunAsyncifier(absl::string_view hlo_string) { TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSIGN_OR_RETURN(bool changed, RunAsyncifier(module.get())); return changed; } absl::StatusOr<bool> RunAsyncifier(HloModule* module) { TF_EXPECT_OK(verifier().Run(module).status()); if (module->has_schedule()) { return absl::InternalError("Expected a non-scheduled module"); } HostMemoryTransferAsyncifier asyncifier(kHostMemorySpaceColor); return asyncifier.Run(module); } private: static constexpr int64_t kHostMemorySpaceColor{5}; }; TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToHost) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0) host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1) constant_0 = s32[] constant(0) ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, host_update, constant_0, constant_0, constant_0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::DynamicUpdateSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToDevice) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0) update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1) constant_0 = s32[] constant(0) ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, update, constant_0, constant_0, constant_0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::DynamicUpdateSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToDevice) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0) host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1) constant_0 = s32[] constant(0) ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, host_update, constant_0, constant_0, constant_0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::DynamicUpdateSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToHost) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0) update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1) constant_0 = s32[] constant(0) ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, update, constant_0, constant_0, constant_0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_TRUE(changed); HloInstruction* dynamic_update_slice_start; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Op() .WithOpcode(HloOpcode::kAsyncDone) .WithOperand(0, m::Op(&dynamic_update_slice_start) .WithOpcode(HloOpcode::kAsyncStart)))); ASSERT_EQ(dynamic_update_slice_start->called_computations().size(), 1); HloComputation* async_dynamic_slice_computation = dynamic_update_slice_start->called_computations().at(0); EXPECT_THAT(async_dynamic_slice_computation->root_instruction(), GmockMatch(m::DynamicUpdateSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToHost) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0) constant_0 = s32[] constant(0) ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::DynamicSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToDevice) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0) constant_0 = s32[] constant(0) ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::DynamicSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToHost) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0) constant_0 = s32[] constant(0) ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::DynamicSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToDevice) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0) constant_0 = s32[] constant(0) ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_TRUE(changed); HloInstruction* dynamic_slice_start; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Op() .WithOpcode(HloOpcode::kAsyncDone) .WithOperand(0, m::Op(&dynamic_slice_start) .WithOpcode(HloOpcode::kAsyncStart)))); ASSERT_EQ(dynamic_slice_start->called_computations().size(), 1); HloComputation* async_dynamic_slice_computation = dynamic_slice_start->called_computations().at(0); EXPECT_THAT(async_dynamic_slice_computation->root_instruction(), GmockMatch(m::DynamicSlice())); } TEST_F(HostMemoryTransferAsyncifierTest, CopyFromHostToHost) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0) ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(host_memory) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Copy())); } TEST_F(HostMemoryTransferAsyncifierTest, CopyFromDeviceToDevice) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0) ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(device) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Copy())); } TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromDeviceToHost) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0) ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_TRUE(changed); HloInstruction* copy_start; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Op() .WithOpcode(HloOpcode::kAsyncDone) .WithOperand( 0, m::Op(&copy_start).WithOpcode(HloOpcode::kAsyncStart)))); ASSERT_EQ(copy_start->called_computations().size(), 1); HloComputation* async_copy_computation = copy_start->called_computations().at(0); EXPECT_THAT(async_copy_computation->root_instruction(), GmockMatch(m::Copy())); } TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromDeviceToHost) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0) ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_TRUE(changed); HloInstruction* copy_start; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Op() .WithOpcode(HloOpcode::kCopyDone) .WithOperand( 0, m::Op(&copy_start).WithOpcode(HloOpcode::kCopyStart)))); } TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromHostToDevice) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0) ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_TRUE(changed); HloInstruction* copy_start; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Op() .WithOpcode(HloOpcode::kAsyncDone) .WithOperand( 0, m::Op(&copy_start).WithOpcode(HloOpcode::kAsyncStart)))); ASSERT_EQ(copy_start->called_computations().size(), 1); HloComputation* async_copy_computation = copy_start->called_computations().at(0); EXPECT_THAT(async_copy_computation->root_instruction(), GmockMatch(m::Copy())); } TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromHostToDevice) { const std::string& hlo_string = R"( HloModule MyModule ENTRY main { host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0) ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get())); EXPECT_TRUE(changed); HloInstruction* copy_start; EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Op() .WithOpcode(HloOpcode::kCopyDone) .WithOperand( 0, m::Op(&copy_start).WithOpcode(HloOpcode::kCopyStart)))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8d39f7fb-274f-498c-acfb-4154cac83495
cpp
tensorflow/tensorflow
stream_pool
third_party/xla/xla/service/stream_pool.cc
third_party/xla/xla/service/stream_pool_test.cc
#include "xla/service/stream_pool.h" #include <memory> #include <utility> #include "absl/strings/str_format.h" namespace xla { StreamPool::Ptr StreamPool::BorrowStream(se::StreamPriority priority) { std::unique_ptr<se::Stream> stream; { absl::MutexLock lock(&mu_); if (streams_with_pri_.find(priority) == streams_with_pri_.end()) { stream = nullptr; } else { while (!streams_with_pri_[priority].empty() && !stream) { stream = std::move(streams_with_pri_[priority].back()); streams_with_pri_[priority].pop_back(); if (stream->ok()) { VLOG(1) << absl::StrFormat( "StreamPool reusing existing stream (%p) with priority: %s", stream.get(), se::StreamPriorityToString(priority)); } else { VLOG(1) << absl::StrFormat( "Stream (%p) was not ok, deleting with : %s", stream.get(), se::StreamPriorityToString(priority)); stream = nullptr; } } } } if (!stream) { stream = executor_->CreateStream(priority).value(); stream->set_name(absl::StrFormat("%s pool stream", se::StreamPriorityToString(priority))); VLOG(1) << absl::StrFormat("Created new stream (%p) with priority = %s", stream.get(), se::StreamPriorityToString(priority)); } PtrDeleter deleter = {this}; return Ptr(stream.release(), deleter); } void StreamPool::ReturnStream(se::Stream* stream) { if (stream->ok()) { VLOG(1) << absl::StrFormat("StreamPool returning ok stream (%p)", stream); absl::MutexLock lock(&mu_); auto priority = std::get<se::StreamPriority>(stream->priority()); streams_with_pri_[priority].emplace_back(stream); } else { VLOG(1) << absl::StrFormat("StreamPool deleting !ok stream (%p)", stream); delete stream; } } }
#include "xla/service/stream_pool.h" #include <memory> #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream_executor.h" #include "xla/test_helpers.h" namespace xla { namespace { class StreamPoolTest : public ::testing::Test { protected: se::StreamExecutor* NewStreamExecutor() { se::Platform* platform = se::PlatformManager::PlatformWithName("Host").value(); return platform->ExecutorForDevice(0).value(); } }; TEST_F(StreamPoolTest, EmptyPool) { se::StreamExecutor* executor = NewStreamExecutor(); StreamPool pool(executor); } TEST_F(StreamPoolTest, OneStreamPool) { se::StreamExecutor* executor = NewStreamExecutor(); StreamPool pool(executor); StreamPool::Ptr stream1 = pool.BorrowStream(); se::Stream* stream1_ptr = stream1.get(); EXPECT_TRUE(stream1->ok()); stream1 = nullptr; StreamPool::Ptr stream2 = pool.BorrowStream(); se::Stream* stream2_ptr = stream2.get(); EXPECT_TRUE(stream2->ok()); stream2 = nullptr; EXPECT_EQ(stream1_ptr, stream2_ptr); } TEST_F(StreamPoolTest, TwoStreamPool) { se::StreamExecutor* executor = NewStreamExecutor(); StreamPool pool(executor); StreamPool::Ptr stream1 = pool.BorrowStream(); se::Stream* stream1_ptr = stream1.get(); EXPECT_TRUE(stream1->ok()); StreamPool::Ptr stream2 = pool.BorrowStream(); se::Stream* stream2_ptr = stream2.get(); EXPECT_TRUE(stream2->ok()); EXPECT_NE(stream1_ptr, stream2_ptr); stream1 = nullptr; StreamPool::Ptr stream3 = pool.BorrowStream(); se::Stream* stream3_ptr = stream3.get(); EXPECT_TRUE(stream3->ok()); EXPECT_EQ(stream1_ptr, stream3_ptr); EXPECT_NE(stream2_ptr, stream3_ptr); stream2 = nullptr; StreamPool::Ptr stream4 = pool.BorrowStream(); se::Stream* stream4_ptr = stream4.get(); EXPECT_TRUE(stream4->ok()); EXPECT_EQ(stream2_ptr, stream4_ptr); EXPECT_NE(stream3_ptr, stream4_ptr); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3896e059-52e8-44fa-aa37-6d162956acdf
cpp
tensorflow/tensorflow
hlo_rematerialization
third_party/xla/xla/service/hlo_rematerialization.cc
third_party/xla/xla/service/hlo_rematerialization_test.cc
#include "xla/service/hlo_rematerialization.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <iterator> #include <limits> #include <memory> #include <optional> #include <set> #include <string> #include <string_view> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/functional/function_ref.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/layout_util.h" #include "xla/map_util.h" #include "xla/service/call_graph.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/service/hlo_dce.h" #include "xla/service/logical_buffer.h" #include "xla/service/tuple_points_to_analysis.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/numbers.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::tsl::strings::HumanReadableNumBytes; bool IsRematerializable(const HloInstruction* instruction) { if (instruction->opcode() == HloOpcode::kCopy) { if (LayoutUtil::Equal(instruction->shape().layout(), instruction->operand(0)->shape().layout())) { return false; } } if (auto collective = DynCast<HloCollectiveInstruction>(instruction)) { return !collective->constrain_layout(); } switch (instruction->opcode()) { case HloOpcode::kCall: case HloOpcode::kConstant: case HloOpcode::kConditional: case HloOpcode::kCustomCall: case HloOpcode::kParameter: case HloOpcode::kWhile: return false; default: return !instruction->HasSideEffect(); } } bool CanBeRematerialized( const HloInstruction* instruction, absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) { auto it = rematerializable_map->find(instruction); if (it != rematerializable_map->end()) { return it->second; } bool rematerializable = IsRematerializable(instruction); (*rematerializable_map)[instruction] = rematerializable; return rematerializable; } bool IsSupportedIndirectUser(const HloInstruction* instruction) { return instruction->opcode() == HloOpcode::kBitcast || instruction->opcode() == HloOpcode::kGetTupleElement; } using BufferId = int64_t; using BufferIdList = absl::InlinedVector<BufferId, 3>; struct RematStrategy { enum { kRecompute, kCompress, kHostOffload, } kind; Shape compact_shape; }; struct Item { HloInstruction* instruction; bool placed = false; bool denylisted = false; BufferIdList buffers_defined; BufferIdList buffers_output; BufferIdList buffers_used; bool is_skip_node = false; private: friend class InstructionList; Item* next = nullptr; Item* prev = nullptr; Item* prev_skip_node = nullptr; Item* next_skip_node = nullptr; int64_t position; }; struct ItemUse { Item* user; int64_t operand_number; std::optional<int64_t> index; ItemUse(Item* user, int64_t op_num, std::optional<int64_t> index) : user(user), operand_number(op_num), index(index) {} bool operator==(const ItemUse& other) const { return user == other.user && operand_number == other.operand_number && index == other.index; } }; using ItemList = absl::InlinedVector<Item*, 3>; using UsesList = absl::InlinedVector<ItemUse, 3>; class InstructionList { public: explicit InstructionList(const HloInstructionSequence& order) { int64_t position = 0; Item* last = nullptr; last_skip_node_ = nullptr; first_skip_node_ = nullptr; for (HloInstruction* inst : order.instructions()) { Item* item = new Item; item->next = nullptr; item->prev = last; if (last == nullptr) { first_ = item; } else { last->next = item; } last = item; item->instruction = inst; item->position = position; position++; item_map_[inst] = item; } } ~InstructionList() { for (Item* item = first_; item != nullptr;) { Item* next = item->next; delete item; item = next; } } size_t size() const { return item_map_.size(); } Item* first() const { return first_; } Item* next(Item* item) const { return item->next; } const Item* next(const Item* item) const { return item->next; } Item* prev(Item* item) const { return item->prev; } const Item* prev(const Item* item) const { return item->prev; } Item* first_skip_node() const { return first_skip_node_; } Item* next_skip_node(Item* item) const { return item->next_skip_node; } Item* CreateItem(HloInstruction* inst) { Item* item = new Item; item->instruction = inst; CHECK(item_map_.insert({inst, item}).second) << "inserting inst twice " << inst->name(); return item; } Item* GetItem(const HloInstruction* inst) const { auto iter = item_map_.find(inst); CHECK(iter != item_map_.end()) << "Did not find " << inst->name(); return iter->second; } void InsertBeforeInstructions(Item* to_insert, absl::Span<Item* const> before_instructions) { VLOG(3) << "InsertBeforeInstructions: " << to_insert->instruction->name() << " before {" << absl::StrJoin(before_instructions, ", ", [](std::string* out, Item* item) { absl::StrAppend(out, item->instruction->name()); }) << "}"; CHECK(!before_instructions.empty()); Item* min_position_item = nullptr; for (Item* item : before_instructions) { if (min_position_item == nullptr || item->position < min_position_item->position) { min_position_item = item; } } while (min_position_item->prev != nullptr && min_position_item->position == min_position_item->prev->position) { min_position_item = min_position_item->prev; } while (!absl::c_linear_search(before_instructions, min_position_item)) { min_position_item = min_position_item->next; } return InsertBefore(to_insert, min_position_item); } void PromoteNodesToSkip(absl::FunctionRef<bool(Item*)> should_promote) { int64_t count = 0; for (auto* item = first(); item != nullptr; item = next(item)) { if (should_promote(item)) { count += 1; if (first_skip_node_ == nullptr) { first_skip_node_ = item; } item->is_skip_node = true; item->prev_skip_node = last_skip_node_; if (last_skip_node_ != nullptr) { last_skip_node_->next_skip_node = item; } last_skip_node_ = item; } } VLOG(1) << " Rematerialization has " << count << " items in express lane"; } void InsertAfterInstructions(Item* to_insert, absl::Span<Item* const> after_instructions) { VLOG(3) << "InsertAfterInstructions: " << to_insert->instruction->name() << " after {" << absl::StrJoin(after_instructions, ", ", [](std::string* out, Item* item) { absl::StrAppend(out, item->instruction->name()); }) << "}"; CHECK(!after_instructions.empty()); Item* max_position_item = nullptr; for (Item* item : after_instructions) { if (max_position_item == nullptr || item->position > max_position_item->position) { max_position_item = item; } } CHECK(max_position_item->next != nullptr); InsertBeforeInstructions(to_insert, {max_position_item->next}); } void Denylist(const HloInstruction* inst) { GetItem(inst)->denylisted = true; } private: void InsertBefore(Item* item, Item* before) { VLOG(3) << "InsertBefore: " << item->instruction->name() << " before " << before->instruction->name(); item->is_skip_node = true; Item* cursor = before; while (cursor != nullptr && !cursor->is_skip_node) { cursor = cursor->next; } CHECK(cursor == nullptr || cursor->is_skip_node); if (cursor == nullptr) { item->prev_skip_node = last_skip_node_; item->next_skip_node = nullptr; last_skip_node_ = item; } else { CHECK(cursor->is_skip_node); item->prev_skip_node = cursor->prev_skip_node; if (item->prev_skip_node != nullptr) { item->prev_skip_node->next_skip_node = item; } item->next_skip_node = cursor; cursor->prev_skip_node = item; } if (first_skip_node_ == cursor) { first_skip_node_ = item; } item->prev = before->prev; item->next = before; before->prev = item; if (item->prev != nullptr) { item->prev->next = item; } else { first_ = item; } item->position = before->position; } Item* first_; Item* first_skip_node_; Item* last_skip_node_; absl::flat_hash_map<const HloInstruction*, Item*> item_map_; }; UsesList GetUsers(const InstructionList& instruction_list, const LogicalBuffer* logical_buffer, const TuplePointsToAnalysis& points_to_analysis, bool* has_indirect_users) { UsesList users; *has_indirect_users = false; for (const BufferAlias& buffer_alias : points_to_analysis.GetBufferAliases(*logical_buffer)) { for (const HloInstruction* user : buffer_alias.instruction()->users()) { if (points_to_analysis.DoesNotUseOperandBuffer( buffer_alias.instruction(), buffer_alias.index(), user)) { continue; } if (buffer_alias.instruction() != logical_buffer->instruction() && !IsSupportedIndirectUser(buffer_alias.instruction())) { *has_indirect_users = true; } Item* user_item = instruction_list.GetItem(user); std::optional<int64_t> user_index = logical_buffer->index().size() != 1 ? std::nullopt : std::make_optional(logical_buffer->index().back()); for (int64_t op_idx : user->OperandIndices(buffer_alias.instruction())) { if (!absl::c_linear_search( users, ItemUse{user_item, static_cast<int>(op_idx), user_index})) { users.push_back( ItemUse{user_item, static_cast<int>(op_idx), user_index}); } } } } return users; } class MemoryUsageTracker { public: MemoryUsageTracker(const HloRematerialization::Options& options, const HloComputation* computation, const TuplePointsToAnalysis& points_to_analysis, const InstructionList& instruction_list); absl::Status BeginInstruction(Item* item); int64_t RematerializationCost(const std::vector<Item*>& items, int64_t memory_reduced, int64_t memory_limit_bytes) const { bool zero_cost_move = true; for (auto* item : items) { auto* instruction = item->instruction; if (absl::c_any_of( instruction->users(), [this](const HloInstruction* inst) { return IsPlaced(inst); })) { zero_cost_move = false; break; } } if (zero_cost_move) { return 0; } CHECK_GT(memory_reduced, 0); return memory_limit_bytes / memory_reduced; } absl::Status EndInstruction(); int64_t MemoryReducedIfCompressed(const Item* item, const Shape& compact_shape) const; int64_t MemoryReducedIfRematerialized( absl::Span<const Item* const> items) const; absl::Status AddCompressInstructions(Item* original_item, Item* compressed_item, Item* uncompressed_item); absl::Status AddRematerializedInstruction(Item* original_item, Item* remat_item, absl::Span<Item*> indirect_users); std::tuple<UsesList, UsesList> GetPlacedAndUnplacedUsers( const UsesList& uses) const; public: absl::Status AddHostOffloadCopyInstructions(Item* original_item, Item* copy_start_to_host_item, Item* copy_done_to_host_item, Item* copy_start_to_device_item, Item* copy_done_to_device_item); int64_t BytesUsedByBuffers(const Item* item, bool only_count_unplaced_users) const; std::optional<int64_t> GetCostOfCompression(const Item* candidate_item, int64_t memory_limit_bytes, int64_t peak_memory_bytes); std::optional<int64_t> GetCostOfHostOffload(const Item* candidate_item, int64_t memory_limit_bytes) const; std::optional<int64_t> GetCostOfRecompute( const std::vector<Item*>& candidate_items, int64_t memory_limit_bytes) const; std::tuple<std::vector<Item*>, RematStrategy, int> PickRematerializationCandidates( const InstructionList& instruction_list, int64_t memory_limit_bytes, absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map, int min_block_size, int max_block_size, int64_t peak_memory_bytes); bool IsPlaced(const HloInstruction* instruction) const { return instruction_list_.GetItem(instruction)->placed; } bool HasUnplacedUsers(Item* item) const; UsesList GetItemUses(Item* item) const; bool IsInProgressItem(Item* item) const { return item == in_progress_item_; } int64_t memory_usage() const { return memory_usage_; } int64_t AllocatedSize(Item* item) const { int64_t size = 0; for (auto buffer_id : item->buffers_defined) { size += AllocatedSize(buffer_id); } return size; } const HloComputation* computation() const { return computation_; } const HloRematerialization::Options& options() const { return options_; } bool Check() const; std::string ToString() const; private: struct Buffer { const BufferId id; Item* defining_instruction; const int64_t size; Shape shape; bool live_out; bool has_indirect_uses; ShapeIndex index; UsesList users; int64_t unfinished_user_count; std::string ToString() const { return absl::StrCat("Buffer ", id, " (defined by ", defining_instruction->instruction->name(), ", size ", size, " bytes)"); } }; void CountAllocatedMemory(Item* item); absl::Status CountFreedMemory(Item* item); void ReplaceUsesInUsersOfBuffer(Buffer& buffer, BufferId old_id) const; absl::StatusOr<const Shape*> GetCompactShape(const HloInstruction* hlo); Buffer& CreateBufferFromLogicalBuffer( const LogicalBuffer* logical_buffer, const TuplePointsToAnalysis& points_to_analysis, bool live_out) { bool has_indirect_uses = false; UsesList users = GetUsers(instruction_list_, logical_buffer, points_to_analysis, &has_indirect_uses); return NewBuffer(instruction_list_.GetItem(logical_buffer->instruction()), logical_buffer->shape(), logical_buffer->index(), std::move(users), live_out, has_indirect_uses); } Buffer& RematerializeBuffer(const Buffer& original_buffer, Item* remat_item, UsesList&& rematerialized_uses) { CHECK(original_buffer.defining_instruction->placed) << original_buffer.defining_instruction->instruction->name(); CHECK(!original_buffer.has_indirect_uses) << original_buffer.ToString(); CHECK(!original_buffer.live_out) << original_buffer.ToString(); for (ItemUse& use : rematerialized_uses) { CHECK(!use.user->placed) << use.user->instruction->name(); } return NewBuffer(remat_item, original_buffer.shape, original_buffer.index, std::move(rematerialized_uses), false, false); } int64_t AllocatedSize(BufferId buffer_id) const { const Buffer& buffer = buffers_.at(buffer_id); HloInstruction* inst = buffer.defining_instruction->instruction; HloOpcode def_opcode = inst->opcode(); if (buffer.live_out || def_opcode == HloOpcode::kParameter) { return 0; } else { if (options_.host_memory_offload_config && buffer.shape.has_layout() && buffer.shape.layout().memory_space() == options_.host_memory_offload_config->host_memory_space) { return 0; } return buffer.size; } } bool IsFinished(Item* item) const { return item->placed && item != in_progress_item_; } bool IsInUse(BufferId buffer_id) const { if (in_progress_item_ == nullptr) { return false; } const BufferIdList& in_progress_uses = in_progress_item_->buffers_used; return absl::c_linear_search(in_progress_uses, buffer_id); } bool IsCurrentlyLive(BufferId buffer_id) const { const Buffer& buffer = buffers_[buffer_id]; return (buffer.defining_instruction->placed && buffer.unfinished_user_count > 0); } bool IsInstructionCurrentlyLive(const Item* instruction) const { if (!IsPlaced(instruction->instruction)) { return false; } for (const HloInstruction* user : instruction->instruction->users()) { if (!IsPlaced(user)) { return true; } } return false; } Buffer& NewBuffer(Item* defining_instruction, const Shape& shape, const ShapeIndex& index, UsesList&& uses, bool live_out, bool has_indirect_uses) { int buffer_id = buffers_.size(); auto get_num_of_unique_users = [](const UsesList& uses) -> int64_t { absl::flat_hash_set<Item*> users_set; for (const ItemUse& use : uses) { users_set.insert(use.user); } return users_set.size(); }; buffers_.push_back(Buffer{buffer_id, defining_instruction, options_.hlo_cost_analysis.GetShapeSize(shape), shape, live_out, has_indirect_uses, index, uses, get_num_of_unique_users(uses)}); return buffers_.back(); } const HloRematerialization::Options& options_; const HloComputation* computation_; const InstructionList& instruction_list_; absl::flat_hash_map<const HloInstruction*, Shape> compact_shape_; int64_t memory_usage_ = 0; Item* in_progress_item_ = nullptr; std::vector<Buffer> buffers_; }; MemoryUsageTracker::MemoryUsageTracker( const HloRematerialization::Options& options, const HloComputation* computation, const TuplePointsToAnalysis& points_to_analysis, const InstructionList& instruction_list) : options_(options), computation_(computation), instruction_list_(instruction_list) { PointsToSet::BufferSet live_out_set = points_to_analysis.GetPointsToSet(computation_->root_instruction()) .CreateFlattenedSet(); absl::flat_hash_map<const LogicalBuffer*, BufferId> logical_buffer_to_buffer_id; for (auto* item = instruction_list_.first(); item != nullptr; item = instruction_list_.next(item)) { const HloInstruction* const instruction = item->instruction; for (const LogicalBuffer* logical_buffer : points_to_analysis.GetBuffersDefinedByInstruction(instruction)) { Buffer* buffer; if (instruction->opcode() == HloOpcode::kWhile) { const PointsToSet& operand_points_to = points_to_analysis.GetPointsToSet(instruction->operand(0)); CHECK_EQ(operand_points_to.element(logical_buffer->index()).size(), 1); const LogicalBuffer* source_logical_buffer = operand_points_to.element(logical_buffer->index())[0]; buffer = &buffers_.at(logical_buffer_to_buffer_id.at(source_logical_buffer)); buffer->has_indirect_uses = true; buffer->live_out = buffer->live_out || ContainsKey(live_out_set, logical_buffer); bool unused; for (ItemUse& user_item : GetUsers(instruction_list_, logical_buffer, points_to_analysis, &unused)) { auto existing_user_it = absl::c_find_if( buffer->users, [&](const ItemUse& use) { return user_item.user == use.user; }); if (existing_user_it == buffer->users.end()) { buffer->unfinished_user_count++; user_item.user->buffers_used.push_back(buffer->id); buffer->users.push_back(user_item); } } } else { buffer = &CreateBufferFromLogicalBuffer( logical_buffer, points_to_analysis, ContainsKey(live_out_set, logical_buffer)); item->buffers_defined.push_back(buffer->id); for (ItemUse& user : buffer->users) { if (!absl::c_linear_search(user.user->buffers_used, buffer->id)) { user.user->buffers_used.push_back(buffer->id); } } } logical_buffer_to_buffer_id[logical_buffer] = buffer->id; } for (const LogicalBuffer* logical_buffer : points_to_analysis.GetPointsToSet(instruction).CreateFlattenedSet()) { item->buffers_output.push_back( logical_buffer_to_buffer_id[logical_buffer]); } } XLA_VLOG_LINES(10, ToString()); DCHECK(Check()); } void MemoryUsageTracker::CountAllocatedMemory(Item* item) { for (BufferId buffer_id : item->buffers_defined) { VLOG(3) << " Buffer " << buffers_.at(buffer_id).ToString() << " is now live."; memory_usage_ += AllocatedSize(buffer_id); } } absl::Status MemoryUsageTracker::CountFreedMemory(Item* item) { for (BufferId buffer_id : item->buffers_used) { Buffer& buffer = buffers_.at(buffer_id); buffer.unfinished_user_count--; TF_RET_CHECK(buffer.unfinished_user_count >= 0) << buffer.ToString() << " has negative unfinished user count."; if (buffer.unfinished_user_count == 0) { VLOG(3) << " " << buffer.ToString() << " is now dead."; memory_usage_ -= AllocatedSize(buffer_id); } } for (BufferId buffer_id : item->buffers_defined) { const Buffer& buffer = buffers_.at(buffer_id); if (buffer.unfinished_user_count == 0) { VLOG(3) << " " << buffer.ToString() << " is immediately dead."; memory_usage_ -= AllocatedSize(buffer_id); } } return absl::OkStatus(); } absl::Status MemoryUsageTracker::BeginInstruction(Item* item) { const HloInstruction* instruction = item->instruction; VLOG(3) << "BeginInstruction " << instruction->name(); TF_RET_CHECK(in_progress_item_ == nullptr); in_progress_item_ = item; item->placed = true; CountAllocatedMemory(item); VLOG(3) << " memory usage = " << memory_usage_; VLOG(10) << ToString(); if (VLOG_IS_ON(1)) { DCHECK(Check()); } return absl::OkStatus(); } absl::Status MemoryUsageTracker::EndInstruction() { TF_RET_CHECK(in_progress_item_ != nullptr); VLOG(3) << "EndInstruction " << in_progress_item_->instruction->name(); TF_RETURN_IF_ERROR(CountFreedMemory(in_progress_item_)); in_progress_item_ = nullptr; VLOG(3) << " memory usage = " << memory_usage_; VLOG(10) << ToString(); if (VLOG_IS_ON(1)) { DCHECK(Check()); } return absl::OkStatus(); } int64_t MemoryUsageTracker::MemoryReducedIfCompressed( const Item* item, const Shape& compact_shape) const { CHECK_NE(in_progress_item_, nullptr); if (!item->placed || item == in_progress_item_) { return 0; } int64_t memory_reduced = 0; CHECK_EQ(item->buffers_output.size(), 1); BufferId buffer_id = item->buffers_output[0]; if (IsCurrentlyLive(buffer_id) && !IsInUse(buffer_id) && IsInstructionCurrentlyLive(item)) { const Buffer& buffer = buffers_.at(buffer_id); memory_reduced += buffer.size; int64_t compact_shape_size = options_.hlo_cost_analysis.GetShapeSize(compact_shape); memory_reduced -= compact_shape_size; } return memory_reduced; } int64_t MemoryUsageTracker::MemoryReducedIfRematerialized( absl::Span<const Item* const> items) const { CHECK_NE(in_progress_item_, nullptr); int64_t memory_reduced = 0; absl::flat_hash_set<const Item*> remat_candidates; for (const Item* item : items) { if (!item->placed || item == in_progress_item_) { LOG(WARNING) << "Unplaced item or in progress item being checked for " "rematerialization."; return 0; } for (BufferId buffer_id : item->buffers_defined) { const Buffer& buffer = buffers_.at(buffer_id); if (buffer.has_indirect_uses || buffer.live_out || buffer.index.size() > 1) { return 0; } if (IsInUse(buffer_id)) { return 0; } if (IsCurrentlyLive(buffer_id)) { memory_reduced += AllocatedSize(buffer_id); } } for (BufferId buffer_id : item->buffers_used) { if (!IsCurrentlyLive(buffer_id)) { Item* defining_instruction = buffers_.at(buffer_id).defining_instruction; if (!remat_candidates.contains(defining_instruction)) { memory_reduced -= AllocatedSize(buffer_id); } } } remat_candidates.insert(item); } return memory_reduced; } std::tuple<UsesList, UsesList> MemoryUsageTracker::GetPlacedAndUnplacedUsers( const UsesList& uses) const { UsesList placed_users, unplaced_users; for (const ItemUse& use : uses) { if (use.user->placed) { DCHECK(IsFinished(use.user)) << use.user->instruction->name(); placed_users.push_back(use); } else { unplaced_users.push_back(use); } } return {placed_users, unplaced_users}; } void MemoryUsageTracker::ReplaceUsesInUsersOfBuffer(Buffer& buffer, BufferId old_id) const { for (ItemUse& use : buffer.users) { BufferIdList& buffers_used = use.user->buffers_used; absl::c_replace(buffers_used, old_id, buffer.id); } } absl::Status MemoryUsageTracker::AddCompressInstructions( Item* original_item, Item* compressed_item, Item* uncompressed_item) { CHECK(original_item->placed) << "Compressing instruction, but the original is not yet placed."; CHECK_EQ(original_item->buffers_output.size(), 1) << "Only compressing items which have a single output buffer"; memory_usage_ -= options_.hlo_cost_analysis.GetShapeSize( original_item->instruction->shape()); memory_usage_ += options_.hlo_cost_analysis.GetShapeSize( compressed_item->instruction->shape()); BufferId original_buffer_id = original_item->buffers_output[0]; Buffer& original_buffer = buffers_.at(original_buffer_id); auto [placed_users, unplaced_users] = GetPlacedAndUnplacedUsers(original_buffer.users); original_buffer.users = std::move(placed_users); original_buffer.unfinished_user_count = 0; original_buffer.users.push_back(ItemUse{compressed_item, 0, std::nullopt}); ShapeIndex copied_index = original_buffer.index; Buffer& compressed_buffer = NewBuffer(compressed_item, compressed_item->instruction->shape(), copied_index, {ItemUse{uncompressed_item, 0, std::nullopt}}, false, false); compressed_item->buffers_used = original_item->buffers_output; compressed_item->buffers_output = {compressed_buffer.id}; compressed_item->buffers_defined.push_back(compressed_buffer.id); Buffer& uncompressed_buffer = NewBuffer(uncompressed_item, uncompressed_item->instruction->shape(), copied_index, std::move(unplaced_users), false, false); uncompressed_item->buffers_used = {compressed_item->buffers_output[0]}; uncompressed_item->buffers_output = {uncompressed_buffer.id}; uncompressed_item->buffers_defined = {uncompressed_buffer.id}; ReplaceUsesInUsersOfBuffer(uncompressed_buffer, original_buffer_id); return absl::OkStatus(); } absl::Status MemoryUsageTracker::AddRematerializedInstruction( Item* original_item, Item* remat_item, absl::Span<Item*> indirect_users) { VLOG(3) << "AddRematerializedInstruction: original_instruction = " << original_item->instruction->name() << ", remat_instruction = " << remat_item->instruction->name(); TF_RET_CHECK(in_progress_item_ != nullptr); TF_RET_CHECK(original_item->placed) << original_item->instruction->name(); TF_RET_CHECK(!remat_item->placed) << remat_item->instruction->name(); remat_item->buffers_used = original_item->buffers_used; for (BufferId buffer_id : original_item->buffers_used) { Buffer& buffer = buffers_.at(buffer_id); if (buffer.unfinished_user_count == 0) { memory_usage_ += AllocatedSize(buffer.id); } buffer.unfinished_user_count++; absl::InlinedVector<ItemUse, 2> filtered_users; std::copy_if(buffer.users.begin(), buffer.users.end(), std::back_inserter(filtered_users), [&](const ItemUse& iu) { return iu.user == original_item; }); for (ItemUse& u : filtered_users) { buffer.users.push_back(ItemUse{remat_item, u.operand_number, u.index}); } } const absl::flat_hash_set<Item*> indirect_users_set(indirect_users.begin(), indirect_users.end()); for (BufferId old_buffer_id : original_item->buffers_defined) { Buffer& old_buffer = buffers_.at(old_buffer_id); UsesList placed_users; UsesList unplaced_users; for (ItemUse& user : old_buffer.users) { if (user.user->placed) { placed_users.push_back(user); } else { if (!IsSupportedIndirectUser(user.user->instruction) || indirect_users_set.contains(user.user)) { unplaced_users.push_back(user); } else { CHECK(user.user->buffers_defined.empty()) << "Buffers defined expected to be empty for use passthrough " "instructions"; user.user->buffers_output.clear(); user.user->buffers_used.clear(); } } } old_buffer.users = std::move(placed_users); old_buffer.unfinished_user_count = 0; memory_usage_ -= AllocatedSize(old_buffer.id); Buffer& new_buffer = RematerializeBuffer(old_buffer, remat_item, std::move(unplaced_users)); remat_item->buffers_defined.push_back(new_buffer.id); remat_item->buffers_output.push_back(new_buffer.id); auto update_buffers = [old_buffer_id, new_buffer_id = new_buffer.id]( BufferIdList& to_update) { std::replace(to_update.begin(), to_update.end(), old_buffer_id, new_buffer_id); }; for (ItemUse& user : new_buffer.users) { update_buffers(user.user->buffers_used); update_buffers(user.user->buffers_output); } } for (Item* indirect_user : indirect_users) { const Item* source_item = instruction_list_.GetItem(indirect_user->instruction->operand(0)); switch (indirect_user->instruction->opcode()) { case HloOpcode::kBitcast: { if (IsSupportedIndirectUser(source_item->instruction)) { indirect_user->buffers_used = source_item->buffers_output; indirect_user->buffers_output = source_item->buffers_output; } else { indirect_user->buffers_used = source_item->buffers_defined; indirect_user->buffers_output = source_item->buffers_defined; } break; } case HloOpcode::kGetTupleElement: { const HloGetTupleElementInstruction* gte = Cast<HloGetTupleElementInstruction>(indirect_user->instruction); for (BufferId buffer_id : source_item->buffers_defined) { const Buffer& def_buffer = buffers_.at(buffer_id); if (def_buffer.index == ShapeIndex{gte->tuple_index()}) { indirect_user->buffers_output.push_back(buffer_id); } if (def_buffer.index.empty()) { indirect_user->buffers_used.push_back(buffer_id); } } break; } default: { LOG(FATAL) << "Unsupported indirect instruction with opcode " << indirect_user->instruction->opcode(); break; } } for (BufferId buffer_id : indirect_user->buffers_used) { Buffer& buffer = buffers_.at(buffer_id); buffer.unfinished_user_count++; buffer.users.push_back(ItemUse{indirect_user, 0, std::nullopt}); } } VLOG(3) << " memory usage = " << memory_usage_; XLA_VLOG_LINES(10, ToString()); DCHECK(Check()); return absl::OkStatus(); } absl::Status MemoryUsageTracker::AddHostOffloadCopyInstructions( Item* original_item, Item* copy_start_to_host_item, Item* copy_done_to_host_item, Item* copy_start_to_device_item, Item* copy_done_to_device_item) { CHECK_EQ(original_item->buffers_defined.size(), 1); CHECK_EQ(original_item->buffers_output.size(), 1); BufferId original_buffer_id = original_item->buffers_output[0]; Buffer& original_buffer = buffers_.at(original_buffer_id); auto [placed_users, unplaced_users] = GetPlacedAndUnplacedUsers(original_buffer.users); original_buffer.users = std::move(placed_users); original_buffer.users.emplace_back(copy_start_to_host_item, 0, std::nullopt); original_buffer.unfinished_user_count = 1; CHECK_EQ(copy_start_to_host_item->instruction->shape().tuple_shapes_size(), 3) << "copy_start_to_host_item's shape is " << copy_start_to_host_item->instruction->shape().ToString(); CHECK_EQ(copy_start_to_device_item->instruction->shape().tuple_shapes_size(), 3) << "copy_start_to_device_item's shape is " << copy_start_to_device_item->instruction->shape().ToString(); BufferId copy_start_to_host_device_buffer_id = NewBuffer(copy_start_to_host_item, copy_start_to_host_item->instruction->shape().tuple_shapes(1), ShapeIndex(), UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}}, false, false) .id; BufferId copy_start_to_host_context_buffer_id = NewBuffer(copy_start_to_host_item, copy_start_to_host_item->instruction->shape().tuple_shapes(2), ShapeIndex(), UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}}, false, false) .id; BufferId copy_start_to_device_device_buffer_id = NewBuffer(copy_start_to_device_item, copy_start_to_device_item->instruction->shape().tuple_shapes(0), ShapeIndex(), UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}}, false, false) .id; BufferId copy_start_to_device_context_buffer_id = NewBuffer(copy_start_to_device_item, copy_start_to_device_item->instruction->shape().tuple_shapes(2), ShapeIndex(), UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}}, false, false) .id; BufferId copy_done_to_device_buffer_id = NewBuffer(copy_done_to_device_item, copy_done_to_device_item->instruction->shape(), ShapeIndex(), std::move(unplaced_users), false, false) .id; copy_start_to_host_item->buffers_used = original_item->buffers_output; copy_start_to_host_item->buffers_output = { copy_start_to_host_device_buffer_id, copy_start_to_host_context_buffer_id}; copy_start_to_host_item->buffers_defined = { copy_start_to_host_device_buffer_id, copy_start_to_host_context_buffer_id}; copy_done_to_host_item->buffers_used = copy_start_to_host_item->buffers_output; copy_done_to_host_item->buffers_output = {}; copy_done_to_host_item->buffers_defined = {}; copy_start_to_device_item->buffers_used = copy_done_to_host_item->buffers_output; copy_start_to_device_item->buffers_output = { copy_start_to_device_device_buffer_id, copy_start_to_device_context_buffer_id}; copy_start_to_device_item->buffers_defined = { copy_start_to_device_device_buffer_id, copy_start_to_device_context_buffer_id}; copy_done_to_device_item->buffers_used = copy_start_to_device_item->buffers_output; copy_done_to_device_item->buffers_output = {copy_done_to_device_buffer_id}; copy_done_to_device_item->buffers_defined = {copy_done_to_device_buffer_id}; Buffer& copy_done_to_device_buffer = buffers_.at(copy_done_to_device_buffer_id); ReplaceUsesInUsersOfBuffer(copy_done_to_device_buffer, original_buffer_id); if (copy_start_to_host_item->placed) { CountAllocatedMemory(copy_start_to_host_item); TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_host_item)); if (copy_done_to_host_item->placed) { CountAllocatedMemory(copy_done_to_host_item); TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_host_item)); if (copy_start_to_device_item->placed) { CountAllocatedMemory(copy_start_to_device_item); TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_device_item)); if (copy_done_to_device_item->placed) { CountAllocatedMemory(copy_done_to_device_item); TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_device_item)); } } } } return absl::OkStatus(); } std::string MemoryUsageTracker::ToString() const { std::string output = absl::StrCat("MemoryUsageTracker for ", computation_->name(), "\n"); absl::StrAppend(&output, "Memory usage: ", HumanReadableNumBytes(memory_usage()), " (", memory_usage(), " bytes)"); for (auto* item = instruction_list_.first(); item != nullptr; item = instruction_list_.next(item)) { const HloInstruction* instruction = item->instruction; absl::string_view inprogress = item == in_progress_item_ ? " in-progress" : ""; absl::string_view placed = item->placed ? " placed" : ""; absl::StrAppend(&output, " ", instruction->name(), inprogress, placed, "\n Defines:\n"); for (BufferId buffer_id : item->buffers_defined) { const Buffer& buffer = buffers_[buffer_id]; absl::string_view live = IsCurrentlyLive(buffer_id) ? " live" : ""; absl::StrAppend(&output, " ", buffer.ToString(), live, ", ", buffer.unfinished_user_count, " unfinished uses\n"); } absl::StrAppend(&output, " Outputs:\n"); for (BufferId buffer_id : item->buffers_output) { absl::StrAppend(&output, " ", buffers_[buffer_id].ToString(), "\n"); } absl::StrAppend(&output, " Uses:\n"); for (BufferId buffer_id : item->buffers_used) { absl::StrAppend(&output, " ", buffers_[buffer_id].ToString(), "\n"); } } return output; } absl::StatusOr<const Shape*> MemoryUsageTracker::GetCompactShape( const HloInstruction* hlo) { auto it = compact_shape_.find(hlo); if (it != compact_shape_.end()) { return &it->second; } const Shape& original_shape = hlo->shape(); TF_ASSIGN_OR_RETURN(Shape min_shape, options_.compact_shape_function(original_shape)); return &compact_shape_.emplace(hlo, min_shape).first->second; } bool MemoryUsageTracker::Check() const { auto elements_are_unique = [](const BufferIdList& vec) { return vec.size() == std::set<BufferId>(vec.begin(), vec.end()).size(); }; for (auto* instruction : computation_->instructions()) { const BufferIdList& defined_buffers = instruction_list_.GetItem(instruction)->buffers_defined; CHECK(elements_are_unique(defined_buffers)) << "Instruction " << instruction->name() << " does not have unique defined buffers: " << absl::StrJoin(defined_buffers, ", ", [this](std::string* out, BufferId buffer_id) { absl::StrAppend(out, buffers_.at(buffer_id).ToString()); }); for (const Buffer& buffer : buffers_) { if (buffer.defining_instruction->instruction == instruction) { CHECK(absl::c_linear_search(defined_buffers, buffer.id)) << "Instruction " << instruction->name() << " defined buffers is missing: " << buffer.ToString(); } } } for (auto* instruction : computation_->instructions()) { const BufferIdList& used_buffers = instruction_list_.GetItem(instruction)->buffers_used; CHECK(elements_are_unique(used_buffers)) << "Instruction " << instruction->name() << " does not have unique used buffers: " << absl::StrJoin(used_buffers, ", ", [this](std::string* out, BufferId buffer_id) { absl::StrAppend(out, buffers_.at(buffer_id).ToString()); }); } for (const Buffer& buffer : buffers_) { int64_t unfinished_uses = 0; absl::flat_hash_set<Item*> already_counted_user; for (const ItemUse& user : buffer.users) { const BufferIdList& used_buffers = user.user->buffers_used; CHECK(absl::c_linear_search(used_buffers, buffer.id)) << "Instruction " << user.user->instruction->name() << " used buffers is missing " << buffer.ToString(); if (!IsFinished(user.user) && already_counted_user.insert(user.user).second) { unfinished_uses++; } } CHECK_EQ(buffer.unfinished_user_count, unfinished_uses) << "Incorrect unplaced use count for " << buffer.ToString(); } return true; } std::vector<Item*> GetInitialBlock(const InstructionList& instruction_list, const MemoryUsageTracker& tracker, Item* start_item, int min_block_size) { std::vector<Item*> item_block; Item* curr_item = start_item; for (int i = 0; i < min_block_size; ++i) { if (curr_item == nullptr || !curr_item->placed || tracker.IsInProgressItem(curr_item)) { break; } item_block.push_back(curr_item); curr_item = instruction_list.next(curr_item); } return item_block; } bool AnyDenylistedOrNonRematerializable( const std::vector<Item*>& block, absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) { for (auto* item : block) { if (item->denylisted) { return true; } if (!CanBeRematerialized(item->instruction, rematerializable_map)) { return true; } } return false; } int64_t MemoryUsageTracker::BytesUsedByBuffers( const Item* item, bool only_count_unplaced_users) const { int64_t bytes_used_by_buffers = 0; for (const auto& buffer_id : item->buffers_defined) { VLOG(3) << " buffer " << buffer_id << "'s users are " << absl::StrJoin(buffers_.at(buffer_id).users, ", ", [](std::string* str, const auto& use) { str->append(use.user->instruction->name()); }); for (const auto& use : buffers_.at(buffer_id).users) { if (!only_count_unplaced_users || !use.user->placed) { bytes_used_by_buffers += AllocatedSize(buffer_id); break; } } } return bytes_used_by_buffers; } std::optional<int64_t> MemoryUsageTracker::GetCostOfCompression( const Item* candidate_item, int64_t memory_limit_bytes, int64_t peak_memory_bytes) { CHECK(candidate_item != nullptr); if (candidate_item->buffers_output.size() != 1) { HloInstruction* candidate_instruction = candidate_item->instruction; VLOG(2) << " " << candidate_instruction->name() << " has more than one output buffer; cannot offload to host."; return {}; } const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]); if (!candidate_item->placed || candidate_item == in_progress_item_ || output_buffer.live_out) { return {}; } const Shape& original_shape = candidate_item->instruction->shape(); if (!original_shape.IsArray()) { return {}; } const Shape* compact_shape = GetCompactShape(candidate_item->instruction).value(); const int64_t memory_reduced = MemoryReducedIfCompressed(candidate_item, *compact_shape); const int64_t size = options_.hlo_cost_analysis.GetShapeSize( candidate_item->instruction->shape()); const int64_t reduced_size = options_.hlo_cost_analysis.GetShapeSize(*compact_shape); if (memory_reduced > 0 && size + reduced_size < peak_memory_bytes) { return memory_limit_bytes / memory_reduced; } else { return {}; } } std::optional<int64_t> MemoryUsageTracker::GetCostOfHostOffload( const Item* candidate_item, int64_t memory_limit_bytes) const { CHECK(candidate_item != nullptr); HloInstruction* candidate_instruction = candidate_item->instruction; VLOG(2) << "Considering host offload as an option for remat. looking at instr " << candidate_instruction->name(); if (candidate_item->buffers_output.size() != 1) { VLOG(2) << " " << candidate_instruction->name() << " has more than one output buffer; cannot offload to host."; return {}; } for (auto buffer_id : candidate_item->buffers_defined) { for (auto use : buffers_.at(buffer_id).users) { if (use.user->instruction->opcode() == HloOpcode::kBitcast) { VLOG(3) << " " << candidate_item->instruction->name() << " has a user which is a bitcast instruction(" << use.user->instruction->name() << "); cannot offload " "to host."; return {}; } else if (use.user->instruction->opcode() == HloOpcode::kTuple) { VLOG(3) << " " << candidate_item->instruction->name() << " has a user which is a tuple instruction(" << use.user->instruction->name() << "); cannot offload " "to host."; return {}; } } } const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]); if (!candidate_item->placed || candidate_item == in_progress_item_ || output_buffer.live_out) { VLOG(2) << " " << candidate_instruction->name() << " is not yet placed, is in progress, or is \"live_out\"; cannot " "offload to host."; return {}; } const bool current_instruction_uses_this_item = [&]() { if (in_progress_item_ == nullptr) { return false; } const auto& output_buffer_ids = candidate_item->buffers_output; for (const auto& output_buffer_id : output_buffer_ids) { const Buffer& output_buffer = buffers_.at(output_buffer_id); for (const auto& use : output_buffer.users) { if (use.user == in_progress_item_) { return true; } } } return false; }(); if (current_instruction_uses_this_item) { VLOG(2) << " " << candidate_instruction->name() << " is used by the current instruction in mem tracker (" << in_progress_item_->instruction->name() << "); cannot offload to host."; return {}; } const int64_t bytes_used_by_buffers = BytesUsedByBuffers(candidate_item, true); if (bytes_used_by_buffers == 0) { VLOG(2) << " " << candidate_instruction->name() << " consumes no memory; no point in offloading."; return {}; } const auto [placed_uses, unplaced_uses] = GetPlacedAndUnplacedUsers(output_buffer.users); const Item* last_placed_user = nullptr; const Item* first_unplaced_user = nullptr; for (const auto* item = instruction_list_.first(); item != nullptr; item = instruction_list_.next(item)) { if (absl::c_find_if(placed_uses, [&](const auto& use) { return use.user == item; }) != placed_uses.end()) { last_placed_user = item; } if (first_unplaced_user == nullptr && absl::c_find_if(unplaced_uses, [&](const auto& use) { return use.user == item; }) != unplaced_uses.end()) { first_unplaced_user = item; break; } } if (last_placed_user == nullptr) { VLOG(3) << " " << candidate_instruction->name() << " has no placed users, starting search at self."; last_placed_user = candidate_item; } CHECK(first_unplaced_user != nullptr) << "Didn't find any unplaced user for instruction \"" << candidate_instruction->name() << "\". There must be a " "bug in how we calculate how much memory this item uses."; float time_spent_before_next_use = 0.0; for (auto* item = last_placed_user; item != first_unplaced_user; item = instruction_list_.next(item)) { time_spent_before_next_use += std::max( 0.0f, options_.hlo_cost_analysis.optimal_seconds(*item->instruction)); } if (time_spent_before_next_use <= 0.0) { return {}; } const float time_spent_on_copies = bytes_used_by_buffers / options_.host_memory_offload_config ->bandwidth_to_host_bytes_per_second + bytes_used_by_buffers / options_.host_memory_offload_config ->bandwidth_from_host_bytes_per_second; if (time_spent_before_next_use < time_spent_on_copies) { return {}; } VLOG(3) << " " << candidate_instruction->name() << " has enough time (" << time_spent_before_next_use << ") between itself and next use. The memcpy out and back will take " << time_spent_on_copies << "s"; return memory_limit_bytes / bytes_used_by_buffers; } std::optional<int64_t> MemoryUsageTracker::GetCostOfRecompute( const std::vector<Item*>& candidate_items, int64_t memory_limit_bytes) const { for (auto* item : candidate_items) { HloInstruction* candidate = item->instruction; if (std::any_of( candidate->control_successors().begin(), candidate->control_successors().end(), [this](const HloInstruction* inst) { return IsPlaced(inst); })) { return {}; } } VLOG(5) << "Block contains:"; for (auto* hlo : candidate_items) { VLOG(5) << hlo->instruction->name(); } const int64_t memory_reduced = MemoryReducedIfRematerialized(candidate_items); if (memory_reduced <= 0) { return {}; } return RematerializationCost(candidate_items, memory_reduced, memory_limit_bytes); } std::tuple<std::vector<Item*>, RematStrategy, int> MemoryUsageTracker::PickRematerializationCandidates( const InstructionList& instruction_list, int64_t memory_limit_bytes, absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map, int min_block_size, int max_block_size, int64_t peak_memory_bytes) { std::vector<Item*> best_items; int64_t best_cost = std::numeric_limits<int64_t>::max(); RematStrategy best_strategy; int effort = 0; VLOG(5) << "Picking candidate block with size in [" << min_block_size << ", " << max_block_size << "]"; for (auto* start_item = instruction_list.first_skip_node(); start_item != nullptr; start_item = instruction_list.next_skip_node(start_item)) { std::vector<Item*> block = GetInitialBlock(instruction_list, *this, start_item, min_block_size); if (block.size() < min_block_size) { break; } if (AnyDenylistedOrNonRematerializable(block, rematerializable_map)) { continue; } if (options_.remat_mode_config.compress && block.size() == 1) { auto cost = GetCostOfCompression(block[0], memory_limit_bytes, peak_memory_bytes); ++effort; if (cost && *cost < best_cost) { VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost << " with strategy kCompress on block of size " << block.size(); best_strategy.kind = RematStrategy::kCompress; best_strategy.compact_shape = *GetCompactShape(block[0]->instruction).value(); best_items = block; best_cost = *cost; } } if (options_.remat_mode_config.host_offload && block.size() == 1) { auto cost = GetCostOfHostOffload(block[0], memory_limit_bytes); ++effort; if (cost && *cost < best_cost) { VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost << " with strategy kHostOffload on block of size " << block.size(); best_strategy.kind = RematStrategy::kHostOffload; best_items = block; best_cost = *cost; } } if (!options_.remat_mode_config.recompute) { continue; } while (block.size() <= max_block_size) { auto cost = GetCostOfRecompute(block, memory_limit_bytes); ++effort; if (cost && *cost < best_cost) { VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost << " with strategy kRecompute on block of size " << block.size(); best_strategy.kind = RematStrategy::kRecompute; best_items = block; best_cost = *cost; } auto* last_item = block[block.size() - 1]; auto* next_item = instruction_list.next(last_item); if (next_item == nullptr || next_item->denylisted || !next_item->placed || next_item == in_progress_item_ || !CanBeRematerialized(next_item->instruction, rematerializable_map)) { break; } block.push_back(next_item); } } return {best_items, best_strategy, effort}; } bool MemoryUsageTracker::HasUnplacedUsers(Item* item) const { for (BufferId buffer_id : item->buffers_defined) { const Buffer& buffer = buffers_.at(buffer_id); for (const ItemUse& user : buffer.users) { if (!user.user->placed) { return true; } } } return false; } UsesList MemoryUsageTracker::GetItemUses(Item* item) const { UsesList combined_users; for (BufferId buffer_id : item->buffers_defined) { const Buffer& buffer = buffers_.at(buffer_id); for (const ItemUse& user : buffer.users) { combined_users.push_back(user); } } return combined_users; } absl::StatusOr<int64_t> RematerializeInstructions( MemoryUsageTracker* memory_tracker, std::vector<Item*>* best_items, absl::flat_hash_set<const HloInstruction*>* remat_move_instructions, InstructionList* instruction_list, HloSchedule* schedule, HloRematerialization* rematerialization) { int64_t net_instructions_added = 0; std::vector<std::string> instruction_names(best_items->size()); for (int i = best_items->size() - 1; i >= 0; --i) { Item* best_item = (*best_items)[i]; HloInstruction* best = best_item->instruction; instruction_names[i] = best->name(); HloComputation* computation = best->parent(); if (!memory_tracker->HasUnplacedUsers(best_item)) { continue; } HloCloneContext context(computation->parent()); HloInstruction* remat = computation->AddInstruction(best->Clone("remat", &context)); for (auto& cloned_computation_pair : context.cloned_computations()) { if (!schedule->is_computation_scheduled(cloned_computation_pair.first)) { continue; } HloInstructionSequence& sequence = schedule->GetOrCreateSequence(cloned_computation_pair.second); HloInstructionSequence& old_sequence = schedule->GetOrCreateSequence(cloned_computation_pair.first); for (HloInstruction* instr : old_sequence.instructions()) { sequence.push_back(instr); } } if (DynCast<HloChannelInstruction>(best) && DynCast<HloChannelInstruction>(best)->channel_id()) { remat->set_channel_id(rematerialization->NextChannelId()); } TF_RETURN_IF_ERROR(remat->CopyAllControlDepsFrom(best)); Item* remat_item = instruction_list->CreateItem(remat); absl::InlinedVector<Item*, 4> indirect_users; absl::flat_hash_map<int64_t, HloInstruction*> gte_cache; for (auto& user : memory_tracker->GetItemUses(best_item)) { if (!memory_tracker->IsPlaced(user.user->instruction)) { VLOG(2) << " Replacing use of " << best->name() << " in " << user.user->instruction->name() << " with " << remat->name(); HloInstruction* remat_use = remat; HloInstruction* const user_operand = user.user->instruction->mutable_operand(user.operand_number); if (remat_use == user_operand) { continue; } if (user.index && remat_use->shape() != user_operand->shape()) { auto cached_gte = gte_cache.find(*user.index); if (cached_gte == gte_cache.end()) { remat_use = computation->AddInstruction( HloInstruction::CreateGetTupleElement( ShapeUtil::GetTupleElementShape(remat_use->shape(), *user.index), remat_use, *user.index), "gte.remat"); indirect_users.push_back(instruction_list->CreateItem(remat_use)); gte_cache[*user.index] = remat_use; } else { remat_use = cached_gte->second; } } if (user_operand->shape() != remat_use->shape()) { remat_use = computation->AddInstruction( HloInstruction::CreateBitcast(user_operand->shape(), remat_use), "bitcast.remat"); indirect_users.push_back(instruction_list->CreateItem(remat_use)); } TF_RETURN_IF_ERROR(user.user->instruction->ReplaceOperandWith( user.operand_number, remat_use)); } } TF_RETURN_IF_ERROR(memory_tracker->AddRematerializedInstruction( best_item, remat_item, absl::MakeSpan(indirect_users))); ItemList place_before; const absl::flat_hash_set<Item*> indirect_users_set(indirect_users.begin(), indirect_users.end()); for (auto user : remat->users()) { if (!indirect_users_set.contains(instruction_list->GetItem(user))) { place_before.push_back(instruction_list->GetItem(user)); } } for (auto* indirect_user : indirect_users) { for (auto user : indirect_user->instruction->users()) { if (!indirect_users_set.contains(instruction_list->GetItem(user))) { place_before.push_back(instruction_list->GetItem(user)); } } } for (auto* operand : remat->operands()) { for (auto* operand_user : operand->users()) { if (operand_user != remat) { Item* operand_user_item = instruction_list->GetItem(operand_user); if (!operand_user_item->placed) { place_before.push_back(operand_user_item); } } } } for (auto successor : remat->control_successors()) { Item* successor_item = instruction_list->GetItem(successor); CHECK(!successor_item->placed) << successor_item->instruction->name(); place_before.push_back(successor_item); } instruction_list->InsertBeforeInstructions(remat_item, place_before); for (auto* bitcast : indirect_users) { instruction_list->InsertBeforeInstructions(bitcast, place_before); } std::function<bool(HloInstruction*)> uses_empty = [&](HloInstruction* i) { for (auto* u : i->users()) { if (!IsSupportedIndirectUser(u) || !uses_empty(u)) { return false; } } return true; }; if (uses_empty(best)) { VLOG(2) << best->name() << " is now dead"; if (ContainsKey(*remat_move_instructions, best)) { instruction_list->Denylist(remat); } remat_move_instructions->insert(remat); net_instructions_added += indirect_users.size(); } else { net_instructions_added += indirect_users.size() + 1; } for (auto* indirect_user : indirect_users) { instruction_list->Denylist(indirect_user->instruction); } if (HloDataflowAnalysis::IsAsynchronousOperationStart(best->opcode()) || HloDataflowAnalysis::IsAsynchronousOperationDone(best->opcode())) { VLOG(2) << "The old instruction " << best->name() << " is an async op. Removing to maintain one start to one done " "invariant to keep the HLO valid."; TF_RETURN_IF_ERROR(best->DropAllControlDeps()); TF_RETURN_IF_ERROR(computation->RemoveInstruction(best)); } } return net_instructions_added; } absl::StatusOr<int64_t> CompressInstruction(MemoryUsageTracker* memory_tracker, Item* best_item, const Shape& compact_shape, InstructionList* instruction_list) { HloInstruction* best = best_item->instruction; VLOG(5) << "Transposing instruction " << best->name() << " (saving " << HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed( best_item, compact_shape)) << ") to" << compact_shape.ToString(true); HloComputation* computation = best->parent(); HloInstruction* compressed = computation->AddInstruction( HloInstruction::CreateUnary(compact_shape, HloOpcode::kCopy, best), absl::StrCat(best->name(), ".remat_compressed")); HloInstruction* uncompressed = computation->AddInstruction( HloInstruction::CreateUnary(best->shape(), HloOpcode::kCopy, compressed), absl::StrCat(best->name(), ".remat_uncompressed")); Item* compressed_item = instruction_list->CreateItem(compressed); compressed_item->placed = true; Item* uncompressed_item = instruction_list->CreateItem(uncompressed); std::vector<HloInstruction*> best_users_copy = best->users(); for (HloInstruction* user : best_users_copy) { if (!memory_tracker->IsPlaced(user)) { VLOG(5) << " Replacing use of " << best->name() << " in " << user->name() << " with " << uncompressed->name(); TF_RETURN_IF_ERROR(best->ReplaceUseWith(user, uncompressed)); } } TF_RETURN_IF_ERROR(memory_tracker->AddCompressInstructions( best_item, compressed_item, uncompressed_item)); ItemList place_before; for (auto user : uncompressed->users()) { place_before.push_back(instruction_list->GetItem(user)); } instruction_list->Denylist(compressed_item->instruction); instruction_list->Denylist(uncompressed_item->instruction); instruction_list->InsertBeforeInstructions(uncompressed_item, place_before); instruction_list->InsertAfterInstructions(compressed_item, {best_item}); return 2; } absl::StatusOr<int64_t> OffloadInstruction(MemoryUsageTracker* memory_tracker, Item* best_item, InstructionList* instruction_list) { HloInstruction* best_instruction = best_item->instruction; HloComputation* computation = best_instruction->parent(); VLOG(2) << "Best_instruction's users: " << absl::StrJoin(best_instruction->users(), ", ", [](std::string* str, const auto* x) { return str->append(x->name()); }); Shape instruction_shape_device = best_instruction->shape(); Shape instruction_shape_host = best_instruction->shape(); instruction_shape_host.mutable_layout()->set_memory_space( memory_tracker->options().host_memory_offload_config->host_memory_space); Shape context_shape = ShapeUtil::MakeShape(U32, {}); HloInstruction* copy_start_to_host = computation->AddInstruction(HloInstruction::CreateCopyStart( ShapeUtil::MakeTupleShape({instruction_shape_host, instruction_shape_device, context_shape}), best_instruction)); HloInstruction* copy_done_to_host = computation->AddInstruction(HloInstruction::CreateUnary( instruction_shape_host, HloOpcode::kCopyDone, copy_start_to_host)); HloInstruction* copy_start_to_device = computation->AddInstruction(HloInstruction::CreateCopyStart( ShapeUtil::MakeTupleShape({instruction_shape_device, instruction_shape_host, context_shape}), copy_done_to_host)); HloInstruction* copy_done_to_device = computation->AddInstruction( HloInstruction::CreateUnary(instruction_shape_device, HloOpcode::kCopyDone, copy_start_to_device)); VLOG(3) << "Created copy_start_to_host instr: " << copy_start_to_host->ToString(); VLOG(3) << "Created copy_done_to_host instr: " << copy_done_to_host->ToString(); VLOG(3) << "Created copy_start_to_device instr: " << copy_start_to_device->ToString(); VLOG(3) << "Created copy_done_to_device instr: " << copy_done_to_device->ToString(); TF_RETURN_IF_ERROR( copy_start_to_host->Visit(&memory_tracker->options().hlo_cost_analysis)); TF_RETURN_IF_ERROR( copy_done_to_host->Visit(&memory_tracker->options().hlo_cost_analysis)); TF_RETURN_IF_ERROR(copy_start_to_device->Visit( &memory_tracker->options().hlo_cost_analysis)); TF_RETURN_IF_ERROR( copy_done_to_device->Visit(&memory_tracker->options().hlo_cost_analysis)); Item* copy_start_to_host_item = instruction_list->CreateItem(copy_start_to_host); Item* copy_done_to_host_item = instruction_list->CreateItem(copy_done_to_host); Item* copy_start_to_device_item = instruction_list->CreateItem(copy_start_to_device); Item* copy_done_to_device_item = instruction_list->CreateItem(copy_done_to_device); instruction_list->Denylist(copy_start_to_host); instruction_list->Denylist(copy_done_to_host); instruction_list->Denylist(copy_start_to_device); instruction_list->Denylist(copy_done_to_device); Item* place_before{nullptr}; { ItemList place_before_list; for (auto user : best_instruction->users()) { if (user == copy_start_to_host) { continue; } auto item_of_user = instruction_list->GetItem(user); if (item_of_user->placed) { continue; } place_before_list.push_back(item_of_user); } CHECK(!place_before_list.empty()) << "Have nothing to place this before!"; for (auto* item = instruction_list->first(); item != nullptr; item = instruction_list->next(item)) { if (absl::c_linear_search(place_before_list, item)) { place_before = item; break; } } } CHECK_NE(place_before, nullptr) << "Could not find an item to place this before."; auto get_first_item_after_compute_time = [&](Item* start_item, Item* end_item, auto successor_func, float time_spent_on_copy) { float time_so_far = 0.0; auto* current_item = start_item; while (time_so_far < time_spent_on_copy) { auto next_item = successor_func(current_item); if (next_item == end_item) { LOG(WARNING) << "Didn't find enough computation before end of window"; break; } current_item = next_item; CHECK_NE(current_item, nullptr) << "current_item is null"; CHECK_NE(current_item->instruction, nullptr) << "current_item's instruction is null"; time_so_far += std::max( 0.0f, memory_tracker->options().hlo_cost_analysis.optimal_seconds( *current_item->instruction)); } return current_item; }; const int64_t bytes_used_by_buffers = memory_tracker->BytesUsedByBuffers( best_item, false); const float copy_to_host_time_seconds = bytes_used_by_buffers / memory_tracker->options() .host_memory_offload_config->bandwidth_to_host_bytes_per_second; const float copy_from_host_time_seconds = bytes_used_by_buffers / memory_tracker->options() .host_memory_offload_config->bandwidth_from_host_bytes_per_second; VLOG(2) << "Item uses " << bytes_used_by_buffers << "B and will take " << copy_to_host_time_seconds << "s to copy to host and " << copy_from_host_time_seconds << "s to copy from host."; VLOG(2) << "Inserting " << copy_start_to_host_item->instruction->name() << " immediately after " << best_item->instruction->name(); instruction_list->InsertAfterInstructions(copy_start_to_host_item, {best_item}); VLOG(2) << "Inserting " << copy_done_to_device_item->instruction->name() << " immediately before " << place_before->instruction->name(); instruction_list->InsertBeforeInstructions(copy_done_to_device_item, {place_before}); auto first_item_after_to_host_copy = get_first_item_after_compute_time( copy_start_to_host_item, copy_done_to_device_item, [&instruction_list](Item* item) { return instruction_list->next(item); }, copy_to_host_time_seconds); VLOG(2) << "Inserting " << copy_done_to_host_item->instruction->name() << " immediately after " << first_item_after_to_host_copy->instruction->name(); instruction_list->InsertAfterInstructions(copy_done_to_host_item, {first_item_after_to_host_copy}); auto first_item_before_from_host_copy = get_first_item_after_compute_time( copy_done_to_device_item, copy_done_to_host_item, [&instruction_list](Item* item) { return instruction_list->prev(item); }, copy_from_host_time_seconds); VLOG(2) << "Inserting " << copy_start_to_device_item->instruction->name() << " immediately before " << first_item_before_from_host_copy->instruction->name(); instruction_list->InsertBeforeInstructions( copy_start_to_device_item, {first_item_before_from_host_copy}); { auto item = instruction_list->first(); while (item != nullptr) { if (item == copy_start_to_host_item || item == copy_done_to_host_item || item == copy_start_to_device_item || item == copy_done_to_device_item) { item->placed = true; } else if (memory_tracker->IsInProgressItem(item)) { break; } item = instruction_list->next(item); } } std::vector<HloInstruction*> best_users_copy = best_instruction->users(); for (HloInstruction* user : best_users_copy) { if (!memory_tracker->IsPlaced(user)) { VLOG(3) << " Replacing use of " << best_instruction->name() << " in " << user->name() << " with " << copy_done_to_device->name(); TF_RETURN_IF_ERROR( best_instruction->ReplaceUseWith(user, copy_done_to_device)); } else { VLOG(3) << user->name() << " is placed, not going to update"; } } TF_RETURN_IF_ERROR(memory_tracker->AddHostOffloadCopyInstructions( best_item, copy_start_to_host_item, copy_done_to_host_item, copy_start_to_device_item, copy_done_to_device_item)); return 4; } struct InstructionsAdded { int remat_count; int net_instructions_added; int effort; }; absl::StatusOr<InstructionsAdded> RematerializeBestBlock( int min_block_size, int max_block_size, MemoryUsageTracker* memory_tracker, InstructionList* instruction_list, HloSchedule* schedule, int64_t memory_limit_bytes, absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map, absl::flat_hash_set<const HloInstruction*>* remat_move_instructions, HloRematerialization* rematerialization) { CHECK(min_block_size > 0) << "Negative block size."; std::vector<Item*> best_items; RematStrategy best_strategy; int effort; std::tie(best_items, best_strategy, effort) = memory_tracker->PickRematerializationCandidates( *instruction_list, memory_limit_bytes, rematerializable_map, min_block_size, max_block_size, rematerialization->ComputationPeakMemory( memory_tracker->computation())); InstructionsAdded num_instructions_added; num_instructions_added.remat_count = best_items.size(); num_instructions_added.effort = effort; if (best_items.empty()) { num_instructions_added.net_instructions_added = 0; return num_instructions_added; } if (best_strategy.kind == RematStrategy::kCompress) { CHECK(best_items.size() == 1) << "More than one instruction compressed simultaneously."; HloInstruction* best = best_items[0]->instruction; VLOG(1) << "Remat via compression: " << best->name() << " (saving " << HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed( best_items[0], best_strategy.compact_shape)) << ")"; TF_ASSIGN_OR_RETURN( num_instructions_added.net_instructions_added, CompressInstruction(memory_tracker, best_items[0], best_strategy.compact_shape, instruction_list)); } else if (best_strategy.kind == RematStrategy::kHostOffload) { CHECK_EQ(best_items.size(), 1) << "More than one buffer offloaded simultaneously."; VLOG(1) << "Remat via offload: " << best_items[0]->instruction->name(); TF_ASSIGN_OR_RETURN( num_instructions_added.net_instructions_added, OffloadInstruction(memory_tracker, best_items[0], instruction_list)); VLOG(4) << "Offload done, hlo computation:\n" << memory_tracker->computation()->ToString(); VLOG(6) << "Memory tracker:\n" << memory_tracker->ToString(); } else { CHECK_EQ(best_strategy.kind, RematStrategy::kRecompute) << "Expecting strategy to be Recompute"; VLOG(1) << "Remat via recomputation: {" << absl::StrJoin(best_items, ", ", [](std::string* out, Item* item) { absl::StrAppend(out, item->instruction->name()); }) << '}'; TF_ASSIGN_OR_RETURN( num_instructions_added.net_instructions_added, RematerializeInstructions(memory_tracker, &best_items, remat_move_instructions, instruction_list, schedule, rematerialization)); } return num_instructions_added; } } absl::StatusOr<int64_t> HloRematerialization::ComputePeakMemory( const HloComputation* computation, const HloInstructionSequence& order, const absl::flat_hash_set<absl::string_view>& execution_threads) const { InstructionList instruction_list(order); MemoryUsageTracker tracker(options_, computation, *points_to_analysis_, instruction_list); int64_t peak_memory = tracker.memory_usage(); for (auto* item = instruction_list.first(); item != nullptr; item = instruction_list.next(item)) { const HloInstruction* instruction = item->instruction; TF_RETURN_IF_ERROR(tracker.BeginInstruction(item)); TF_ASSIGN_OR_RETURN( int64_t callee_usage, CalledComputationsMemoryUsage(instruction, execution_threads)); peak_memory = std::max<int64_t>(peak_memory, tracker.memory_usage() + callee_usage); TF_RETURN_IF_ERROR(tracker.EndInstruction()); } VLOG(1) << "Peak memory for " << computation->name() << ": " << HumanReadableNumBytes(peak_memory); return peak_memory; } absl::StatusOr<int64_t> HloRematerialization::CalledComputationsMemoryUsage( const HloInstruction* instruction, const absl::flat_hash_set<absl::string_view>& execution_threads) const { const CallSite* callsite = call_graph_->GetNode(instruction->parent()).GetCallSite(instruction); if (callsite == nullptr || callsite->context() == CallContext::kEmbedded) { return 0; } int64_t callee_usage = 0; for (const HloComputation* computation : callsite->called_computations()) { if (!HloInstruction::IsThreadIncluded(computation->execution_thread(), execution_threads)) { continue; } TF_RET_CHECK(ContainsKey(computation_peak_memory_, computation)); callee_usage += computation_peak_memory_.at(computation); } return callee_usage; } absl::StatusOr<bool> HloRematerialization::RematerializeComputation( HloComputation* computation, HloSchedule* schedule, int64_t memory_limit_bytes, int64_t min_remat_size, const absl::flat_hash_set<absl::string_view>& execution_threads) { const auto peak_memory_usage = computation_peak_memory_.at(computation); if (peak_memory_usage <= memory_limit_bytes) { VLOG(1) << "Asked to rematerialize computation of size " << peak_memory_usage << " but it already fits within the given memory limit (" << memory_limit_bytes << ")"; return false; } VLOG(1) << "Rematerializing computation " << computation->name() << " with limit " << HumanReadableNumBytes(memory_limit_bytes); VLOG(1) << "peak memory usage is " << HumanReadableNumBytes(peak_memory_usage); CHECK(!ContainsKey(rematerialized_computations_, computation)); InstructionList instruction_list(schedule->sequence(computation)); MemoryUsageTracker memory_tracker(options_, computation, *points_to_analysis_, instruction_list); instruction_list.PromoteNodesToSkip([&](Item* item) { return memory_tracker.AllocatedSize(item) >= min_remat_size; }); bool changed = false; absl::flat_hash_set<const HloInstruction*> remat_move_instructions; absl::flat_hash_map<const HloInstruction*, bool> rematerializable_map; int64_t peak_memory = memory_tracker.memory_usage(); int64_t remat_count = 0; int64_t net_instructions_added = 0; const CallGraphNode& call_graph_node = call_graph_->GetNode(computation); int64_t instruction_index = 0; for (auto* item = instruction_list.first(); item != nullptr; item = instruction_list.next(item)) { const HloInstruction* instruction = item->instruction; TF_ASSIGN_OR_RETURN( int64_t callee_usage, CalledComputationsMemoryUsage(instruction, execution_threads)); TF_RETURN_IF_ERROR(memory_tracker.BeginInstruction(item)); VLOG(2) << "Program point at " << instruction->name() << ", memory usage = " << memory_tracker.memory_usage() << ", callee usage = " << callee_usage << ", [" << instruction_index << "/" << instruction_list.size() << "]"; instruction_index++; int min_block_size = 1; int max_block_size = 1; if (memory_tracker.AllocatedSize(item) + callee_usage > 0) { bool is_first_phase = true; int64_t first_phase_effort = 0; int64_t second_phase_effort = 0; while (memory_tracker.memory_usage() + callee_usage > memory_limit_bytes) { VLOG(2) << "Over memory limit at instruction " << instruction->name() << ", using " << HumanReadableNumBytes(memory_tracker.memory_usage() + callee_usage) << ", limit is " << HumanReadableNumBytes(memory_limit_bytes); TF_ASSIGN_OR_RETURN( InstructionsAdded instructions_added, RematerializeBestBlock(min_block_size, max_block_size, &memory_tracker, &instruction_list, schedule, memory_limit_bytes, &rematerializable_map, &remat_move_instructions, this)); net_instructions_added += instructions_added.net_instructions_added; remat_count += instructions_added.remat_count; if (is_first_phase) { first_phase_effort += instructions_added.effort; } else { second_phase_effort += instructions_added.effort; } if (instructions_added.net_instructions_added > 0) { VLOG(1) << "memory_usage after rematerialization = " << HumanReadableNumBytes(memory_tracker.memory_usage()); } if (instructions_added.remat_count == 0) { min_block_size = max_block_size + 1; max_block_size = 2 * max_block_size; is_first_phase = false; } else { max_rematerialized_block_size_ = std::max(max_rematerialized_block_size_, max_block_size); changed = true; min_block_size = 1; max_block_size = 1; } if (max_block_size > options_.block_size_limit || second_phase_effort > options_.block_rematerialization_factor * first_phase_effort) { break; } } } const CallSite* callsite = call_graph_node.GetCallSite(instruction); if (callsite != nullptr && callsite->context() == CallContext::kControlFlow && memory_tracker.memory_usage() + callee_usage > memory_limit_bytes) { VLOG(1) << "Memory usage still over the limit (" << (memory_tracker.memory_usage() + callee_usage) << " > " << memory_limit_bytes << "). Rematerializing computations called by " << instruction->name(); for (HloComputation* called_computation : callsite->called_computations()) { if (!ContainsKey(rematerialized_computations_, called_computation) && HloInstruction::IsThreadIncluded( called_computation->execution_thread(), execution_threads)) { int64_t subcomputation_memory_limit_bytes = std::max<int64_t>( 0, memory_limit_bytes - memory_tracker.memory_usage()); TF_ASSIGN_OR_RETURN( bool subcomputation_changed, RematerializeComputation(called_computation, schedule, subcomputation_memory_limit_bytes, min_remat_size, execution_threads)); changed |= subcomputation_changed; } } TF_ASSIGN_OR_RETURN(callee_usage, CalledComputationsMemoryUsage( instruction, execution_threads)); } peak_memory = std::max<int64_t>( peak_memory, memory_tracker.memory_usage() + callee_usage); VLOG(3) << "peak memory usage = " << HumanReadableNumBytes(peak_memory); TF_RETURN_IF_ERROR(memory_tracker.EndInstruction()); } for (auto* instruction : computation->instructions()) { CHECK(memory_tracker.IsPlaced(instruction)) << instruction->name(); } VLOG(1) << "In computation " << computation->name() << " rematerialized " << remat_count << " instructions; " << net_instructions_added << " net instructions added"; VLOG(1) << " peak memory usage now " << HumanReadableNumBytes(peak_memory) << " (was " << HumanReadableNumBytes(computation_peak_memory_.at(computation)) << ")"; computation_peak_memory_.at(computation) = peak_memory; HloInstructionSequence& sequence = schedule->GetOrCreateSequence(computation); sequence.clear(); for (auto* item = instruction_list.first(); item != nullptr; item = instruction_list.next(item)) { HloInstruction* instruction = item->instruction; sequence.push_back(instruction); } rematerialized_computations_.insert(computation); instructions_rematerialized_ += remat_count; net_instructions_added_ += net_instructions_added; return changed; } absl::StatusOr<bool> HloRematerialization::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (options_.remat_mode_config.host_offload) { CHECK(options_.host_memory_offload_config.has_value()) << "Host memory config is required when host memory offload strategy " "is specified"; } VLOG(1) << "HloRematerialization() with memory limit of " << HumanReadableNumBytes(options_.memory_limit_bytes); if (!options_.remat_mode_config.compress && !options_.remat_mode_config.recompute && !options_.remat_mode_config.host_offload) { VLOG(1) << "All rematerialization strategies are disabled. Skipping."; return false; } VLOG(2) << "HloRemat mode: compress: " << options_.remat_mode_config.compress << ", host_offload: " << options_.remat_mode_config.host_offload << ", recompute: " << options_.remat_mode_config.recompute; XLA_VLOG_LINES(3, "Before HloRematerialization:\n" + module->ToString()); computation_peak_memory_.clear(); rematerialized_computations_.clear(); instructions_rematerialized_ = 0; net_instructions_added_ = 0; TF_RET_CHECK(module->has_schedule()); TF_ASSIGN_OR_RETURN(points_to_analysis_, TuplePointsToAnalysis::Run(module)); next_channel_id_ = hlo_query::NextChannelId(*module); int64_t module_output_size = 0; ShapeUtil::ForEachSubshape( module->result_shape(), [&module_output_size, this](const Shape& subshape, const ShapeIndex& output_index) { module_output_size += options_.hlo_cost_analysis.GetShapeSize(subshape); }); int64_t adjusted_memory_limit_bytes = std::max<int64_t>(0, options_.memory_limit_bytes - module_output_size); VLOG(1) << "Adjusted memory limit accounting for output (" << HumanReadableNumBytes(module_output_size) << "): " << HumanReadableNumBytes(adjusted_memory_limit_bytes); call_graph_ = CallGraph::Build(module); int64_t total_async_peak_memory = 0; if (!options_.async_computation_parallelism.empty()) { absl::flat_hash_set<std::string_view> async_threads; for (const auto& [computation, _] : options_.async_computation_parallelism) { async_threads.insert(computation->execution_thread()); } TF_RETURN_IF_ERROR(call_graph_->VisitNodes( [this, module, &async_threads](const CallGraphNode& node) -> absl::Status { auto callee_thread = node.computation()->execution_thread(); if (node.context() == CallContext::kControlFlow && HloInstruction::IsThreadIncluded(callee_thread, async_threads)) { TF_ASSIGN_OR_RETURN(computation_peak_memory_[node.computation()], ComputePeakMemory(node.computation(), module->schedule().sequence( node.computation()), {callee_thread})); } return absl::OkStatus(); }, false)); int64_t async_peak_memory = 0; for (const auto [entry_computation, parallel_threads] : options_.async_computation_parallelism) { const int64_t peak_memory = computation_peak_memory_.at(entry_computation); const int64_t parallel_peak_memory = peak_memory * parallel_threads; async_peak_memory = std::max(async_peak_memory, parallel_peak_memory); } adjusted_memory_limit_bytes = std::max<int64_t>(0, adjusted_memory_limit_bytes - async_peak_memory); total_async_peak_memory += async_peak_memory; VLOG(1) << "Adjusted memory limit accounting for async computations (" << HumanReadableNumBytes(async_peak_memory) << "): " << HumanReadableNumBytes(adjusted_memory_limit_bytes); computation_peak_memory_.clear(); } TF_RETURN_IF_ERROR(call_graph_->VisitNodes( [this, module, &execution_threads](const CallGraphNode& node) -> absl::Status { if (node.context() == CallContext::kControlFlow && HloInstruction::IsThreadIncluded( node.computation()->execution_thread(), execution_threads)) { TF_ASSIGN_OR_RETURN( computation_peak_memory_[node.computation()], ComputePeakMemory(node.computation(), module->schedule().sequence(node.computation()), execution_threads)); } return absl::OkStatus(); }, false)); const int64_t before_peak_memory = computation_peak_memory_.at(module->entry_computation()) + module_output_size + total_async_peak_memory; VLOG(1) << "Peak memory usage of module (before): " << HumanReadableNumBytes(before_peak_memory); for (auto* computation : module->MakeComputationPostOrder(execution_threads)) { TF_RETURN_IF_ERROR(computation->Accept(&options_.hlo_cost_analysis)); } TF_ASSIGN_OR_RETURN( bool changed, RematerializeComputation(module->entry_computation(), &module->schedule(), adjusted_memory_limit_bytes, options_.min_remat_size, execution_threads)); HloSchedule saved_schedule = module->schedule(); module->clear_schedule(); TF_ASSIGN_OR_RETURN(bool dead_code_removed, HloDCE().Run(module)); changed |= dead_code_removed; TF_RETURN_IF_ERROR(saved_schedule.Update(execution_threads)); TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule))); VLOG(1) << "Rematerialized " << instructions_rematerialized_ << " instructions in module " << module->name() << "; " << net_instructions_added_ << " net instructions added"; const int64_t current_peak_memory = computation_peak_memory_.at(module->entry_computation()) + module_output_size + total_async_peak_memory; VLOG(1) << "Peak memory usage of module now " << HumanReadableNumBytes(current_peak_memory) << " (" << current_peak_memory << " bytes), was " << HumanReadableNumBytes(before_peak_memory) << " (" << before_peak_memory << " bytes)"; const int64_t reduced_peak_memory = before_peak_memory - current_peak_memory; VLOG(1) << "Reduced peak memory by " << HumanReadableNumBytes(reduced_peak_memory) << " (" << reduced_peak_memory << " bytes)"; sizes_.before_bytes = before_peak_memory; sizes_.after_bytes = current_peak_memory; XLA_VLOG_LINES(5, "After HloRematerialization:\n" + module->ToString()); if (current_peak_memory > options_.memory_limit_bytes) { LOG(WARNING) << absl::StrFormat( "Can't reduce memory use below %s (%d bytes) by rematerialization; " "only reduced to %s (%d bytes), down from %s (%d bytes) originally", HumanReadableNumBytes(options_.memory_limit_bytes), options_.memory_limit_bytes, HumanReadableNumBytes(current_peak_memory), current_peak_memory, HumanReadableNumBytes(before_peak_memory), before_peak_memory); } return changed; } }
#include "xla/service/hlo_rematerialization.h" #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <string> #include <gmock/gmock.h> #include "absl/container/flat_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/layout.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/hlo_memory_scheduler.h" #include "xla/service/hlo_rematerialization_test_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/util.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; using ::testing::_; class AsyncRematerializationTest : public RematerializationTestBase { protected: absl::StatusOr<bool> RunHloRematerialization( int64_t memory_limit_bytes, HloModule* module, const absl::flat_hash_map<HloComputation*, int64_t>& async_computation_parallelism, int64_t min_remat_size = 0) { TF_EXPECT_OK(verifier().Run(module).status()); if (!module->has_schedule()) { HloMemoryScheduler scheduler( [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); }, ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler)); TF_EXPECT_OK(scheduler.Run(module).status()); } HloRematerialization::RematerializationModeConfig config( true, true, false); auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); }; HloCostAnalysis cost_analysis(shape_size_func); HloRematerialization::Options options( cost_analysis, config, memory_limit_bytes, 1, 1, min_remat_size, nullptr, std::nullopt, async_computation_parallelism); HloRematerialization::RematerializationSizes sizes; HloRematerialization remat(options, sizes); return remat.Run(module, {HloInstruction::kMainExecutionThread}); } static constexpr int64_t kNumParallelThreads = 16; }; TEST_F(AsyncRematerializationTest, AsyncComputation) { constexpr std::string_view hlo = R"( HloModule async, is_scheduled=true %offload_computation { %param = f32[1]{0} parameter(0) %reshape = f32[] reshape(f32[1]{0} %param) %broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={} %negate = f32[1024]{0} negate(f32[1024]{0} %broadcast) %concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0} %slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]} %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0} ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]} } %main_computation { %param = f32[1]{0} parameter(0) %reshape = f32[] reshape(f32[1]{0} %param) %broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={} %negate = f32[1024]{0} negate(f32[1024]{0} %broadcast) %concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0} %slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]} %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0} ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]} } ENTRY %main { %param = f32[1]{0} parameter(0) %call-start = ((f32[1]{0}), f32[1]{0}, s32[]) call-start(f32[1]{0} %param), to_apply=%offload_computation, async_execution_thread="offload" %call-done = f32[1]{0} call-done(((f32[1]{0}), f32[1]{0}, s32[]) %call-start) ROOT %call = f32[1]{0} call(f32[1]{0} %call-done), to_apply=%main_computation } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); HloInstruction* call_start = FindInstruction(module.get(), "call-start"); TF_ASSERT_OK_AND_ASSIGN( bool changed, RunHloRematerialization( kNumParallelThreads * 16 * 1024 + 14 * 1024, module.get(), {{call_start->async_wrapped_computation(), kNumParallelThreads}})); EXPECT_TRUE(changed); } class RecomputeAndCompressHloRematerializationTest : public RematerializationTestBase { protected: absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes, HloModule* module, int64_t min_remat_size = 0) { TF_EXPECT_OK(verifier().Run(module).status()); if (!module->has_schedule()) { HloMemoryScheduler scheduler( [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); }, ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler)); TF_EXPECT_OK(scheduler.Run(module).status()); } for (const HloComputation* computation : module->computations()) { before_computation_names_.insert(computation->name()); for (const HloInstruction* instruction : computation->instructions()) { before_instruction_names_.insert(instruction->name()); } } HloRematerialization::RematerializationModeConfig config( true, true, false); auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); }; HloCostAnalysis cost_analysis(shape_size_func); HloRematerialization::Options options( cost_analysis, config, memory_limit_bytes, 1, 1, min_remat_size, nullptr, std::nullopt, {}); HloRematerialization::RematerializationSizes sizes; HloRematerialization remat(options, sizes); absl::StatusOr<bool> result = remat.Run(module); for (const HloComputation* computation : module->computations()) { if (!before_computation_names_.contains(computation->name())) { continue; } for (const HloInstruction* instruction : computation->instructions()) { after_instruction_names_.insert(instruction->name()); } } return result; } void CheckForRematInInstructionNames(absl::string_view test_case_name) { constexpr const absl::string_view kRematInstructionNameMustContain = ".remat"; for (const auto& instruction_name : after_instruction_names_) { if (!before_instruction_names_.contains(instruction_name)) { EXPECT_TRUE(absl::StrContains(instruction_name, kRematInstructionNameMustContain)) << "[" << test_case_name << "] Instruction \"" << instruction_name << "\" must contain \"" << kRematInstructionNameMustContain << "\""; } } } private: absl::flat_hash_set<absl::string_view> before_computation_names_; absl::flat_hash_set<absl::string_view> before_instruction_names_; absl::flat_hash_set<absl::string_view> after_instruction_names_; }; TEST_F(RecomputeAndCompressHloRematerializationTest, SingleComputation) { auto module = CreateNewVerifiedModule(); HloComputation* computation = module->AddEntryComputation(MakeRematerializableComputation()); const HloInstruction* slice = computation->root_instruction(); ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _))); const HloInstruction* concat = slice->operand(0); const HloInstruction* bcast = concat->operand(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 14 * 1024, module.get())); EXPECT_TRUE(changed); EXPECT_EQ(computation->root_instruction(), slice); const HloInstruction* remat_bcast = concat->operand(0); EXPECT_THAT(remat_bcast, op::Broadcast(::testing::Ne(bcast))); EXPECT_EQ(module->schedule() .sequence(computation) .instructions()[computation->instruction_count() - 2], concat); EXPECT_EQ(module->schedule() .sequence(computation) .instructions()[computation->instruction_count() - 3], remat_bcast); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, SingleComputationNoWorthRemat) { auto module = CreateNewVerifiedModule(); HloComputation* computation = module->AddEntryComputation(MakeRematerializableComputation()); const HloInstruction* slice = computation->root_instruction(); ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _))); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 14 * 1024, module.get(), 14 * 1024)); EXPECT_FALSE(changed); } TEST_F(RecomputeAndCompressHloRematerializationTest, SingleComputationNoRematerialization) { auto module = CreateNewVerifiedModule(); HloComputation* computation = module->AddEntryComputation(MakeRematerializableComputation()); EXPECT_EQ(computation->instruction_count(), 8); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 20 * 1024, module.get())); EXPECT_FALSE(changed); EXPECT_EQ(computation->instruction_count(), 8); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematerializeAroundWhile) { auto module = CreateNewVerifiedModule(); auto cond_builder = HloComputation::Builder(TestName() + ".cond"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, vec1_shape_, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); HloComputation* while_cond = module->AddEmbeddedComputation(cond_builder.Build()); HloComputation* body_computation = module->AddEmbeddedComputation( MakeRematerializableComputation(".body")); HloComputation* entry_computation = module->AddEntryComputation(MakeRematerializableWhileComputation( while_cond, body_computation)); EXPECT_EQ(entry_computation->instruction_count(), 7); EXPECT_EQ(body_computation->instruction_count(), 8); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 17 * 1024, module.get())); EXPECT_TRUE(changed); EXPECT_EQ(entry_computation->instruction_count(), 8); EXPECT_EQ(body_computation->instruction_count(), 8); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematerializeEntryAndWhileBody) { auto module = CreateNewVerifiedModule(); auto cond_builder = HloComputation::Builder(TestName() + ".cond"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, vec1_shape_, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); HloComputation* while_cond = module->AddEmbeddedComputation(cond_builder.Build()); HloComputation* body_computation = module->AddEmbeddedComputation( MakeRematerializableComputation(".body")); HloComputation* entry_computation = module->AddEntryComputation(MakeRematerializableWhileComputation( while_cond, body_computation)); EXPECT_EQ(entry_computation->instruction_count(), 7); EXPECT_EQ(body_computation->instruction_count(), 8); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 15 * 1024, module.get())); EXPECT_TRUE(changed); EXPECT_EQ(entry_computation->instruction_count(), 9); EXPECT_EQ(body_computation->instruction_count(), 9); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematerializeNestedComputations) { auto module = CreateNewVerifiedModule(); auto cond_builder = HloComputation::Builder(TestName() + ".cond"); cond_builder.AddInstruction( HloInstruction::CreateParameter(0, vec1_shape_, "param")); cond_builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); HloComputation* while_cond = module->AddEmbeddedComputation(cond_builder.Build()); HloComputation* while_cond_copy = module->AddEmbeddedComputation(while_cond->Clone()); HloComputation* inner_computation = module->AddEmbeddedComputation( MakeRematerializableComputation(".inner")); HloComputation* middle_computation = module->AddEmbeddedComputation(MakeRematerializableWhileComputation( while_cond, inner_computation, ".middle")); HloComputation* entry_computation = module->AddEntryComputation(MakeRematerializableWhileComputation( while_cond_copy, middle_computation)); EXPECT_EQ(entry_computation->instruction_count(), 7); EXPECT_EQ(middle_computation->instruction_count(), 7); EXPECT_EQ(inner_computation->instruction_count(), 8); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 13 * 1024, module.get())); EXPECT_TRUE(changed); EXPECT_EQ(entry_computation->instruction_count(), 9); EXPECT_EQ(middle_computation->instruction_count(), 9); EXPECT_EQ(inner_computation->instruction_count(), 9); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RngNotRematerialized) { auto module = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param")); auto rng = builder.AddInstruction(HloInstruction::CreateRng( vec1024_shape_, RandomDistribution::RNG_UNIFORM, {param, param})); auto tanh = builder.AddInstruction( HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kTanh, rng)); auto exp = builder.AddInstruction( HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kExp, rng)); auto add_0 = builder.AddInstruction( HloInstruction::CreateBinary(vec1024_shape_, HloOpcode::kAdd, rng, tanh)); auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, rng, builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, exp, add_0)))); builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, rng, builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, tanh, add_1)))); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); auto count_rngs = [](const HloComputation* computation) { int64_t rng_count = 0; for (auto* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kRng) { ++rng_count; } } return rng_count; }; ASSERT_EQ(count_rngs(entry_computation), 1); const int64_t original_instruction_count = entry_computation->instruction_count(); TF_ASSERT_OK_AND_ASSIGN( bool changed, RunHloRematerialization( 4 * ByteSizeOf(vec1024_shape_), module.get())); EXPECT_TRUE(changed); EXPECT_EQ(count_rngs(entry_computation), 1); EXPECT_GT(entry_computation->instruction_count(), original_instruction_count); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, InstructionRematerializedMultipleTimes) { auto module = CreateNewVerifiedModule(); HloComputation* subcomputation = nullptr; { auto builder = HloComputation::Builder(TestName() + ".subcomputation"); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vec1024_shape_, "param")); auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate( ShapeUtil::MakeShape(xla::F32, {2048}), {param, param}, 0)); builder.AddInstruction(HloInstruction::CreateSlice( vec1024_shape_, concat, {0}, {1024}, {1})); subcomputation = module->AddEmbeddedComputation(builder.Build()); } auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param")); auto bcast = builder.AddInstruction( HloInstruction::CreateBroadcast(vec1024_shape_, param, {})); auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, bcast, bcast)); auto call_1 = builder.AddInstruction( HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation)); auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, bcast, call_1)); auto call_2 = builder.AddInstruction( HloInstruction::CreateCall(vec1024_shape_, {add_2}, subcomputation)); auto add_3 = builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, bcast, call_2)); auto call_3 = builder.AddInstruction( HloInstruction::CreateCall(vec1024_shape_, {add_3}, subcomputation)); auto add_4 = builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, bcast, call_3)); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); auto count_broadcasts = [](const HloComputation* computation) { int64_t bcast_count = 0; for (auto* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kBroadcast) { bcast_count++; } } return bcast_count; }; EXPECT_EQ(count_broadcasts(entry_computation), 1); EXPECT_EQ(entry_computation->instruction_count(), 9); EXPECT_EQ(add_2->operand(0), bcast); EXPECT_EQ(add_3->operand(0), bcast); EXPECT_EQ(add_4->operand(0), bcast); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 22 * 1024, module.get())); EXPECT_TRUE(changed); EXPECT_EQ(count_broadcasts(entry_computation), 4); EXPECT_EQ(entry_computation->instruction_count(), 12); EXPECT_NE(add_2->operand(0), bcast); EXPECT_THAT(add_2->operand(0), op::Broadcast(param)); EXPECT_NE(add_3->operand(0), bcast); EXPECT_THAT(add_3->operand(0), op::Broadcast(param)); EXPECT_NE(add_4->operand(0), bcast); EXPECT_THAT(add_4->operand(0), op::Broadcast(param)); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, CopyNotRematerialized) { auto module = CreateNewVerifiedModule(); auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vec1024_shape_, "param")); auto copy = builder.AddInstruction( HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kCopy, param)); auto negate_a_1 = builder.AddInstruction( HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy)); auto negate_a_2 = builder.AddInstruction(HloInstruction::CreateUnary( vec1024_shape_, HloOpcode::kNegate, negate_a_1)); auto negate_b_1 = builder.AddInstruction( HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy)); auto negate_b_2 = builder.AddInstruction(HloInstruction::CreateUnary( vec1024_shape_, HloOpcode::kNegate, negate_b_1)); builder.AddInstruction(HloInstruction::CreateTuple({negate_a_2, negate_b_2})); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 1 * 1024, module.get())); auto count_copies = [](const HloComputation* computation) { int64_t copy_count = 0; for (auto* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kCopy) { copy_count++; } } return copy_count; }; EXPECT_TRUE(changed); EXPECT_EQ(count_copies(entry_computation), 1); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, ThroughBitcastRemat) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true ENTRY %mycomp (param: f32[1]) -> f32[1] { %param = f32[1]{0} parameter(0) %reshape = f32[] reshape(f32[1]{0} %param) %broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={} %bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast) %negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %broadcast) %concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate, f32[1024,1]{1,0} %negate), dimensions={0} %slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate), slice={[0:1], [0:1]} %bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice) %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast, f32[1]{0} %bitcast.1), dimensions={0} ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto* computation = module->entry_computation(); const HloInstruction* slice = computation->root_instruction(); ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Bitcast(op::Broadcast(_)), _))); const HloInstruction* concat = slice->operand(0); const HloInstruction* bcast = concat->operand(0)->operand(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 14 * 1024, module.get())); EXPECT_TRUE(changed); EXPECT_EQ(computation->root_instruction(), slice); const HloInstruction* remat_bitcast = concat->operand(0); const HloInstruction* remat_broadcast = remat_bitcast->operand(0); EXPECT_THAT(remat_broadcast, op::Broadcast(::testing::Ne(bcast))); EXPECT_EQ(module->schedule() .sequence(computation) .instructions()[computation->instruction_count() - 2], concat); EXPECT_EQ(module->schedule() .sequence(computation) .instructions()[computation->instruction_count() - 3], remat_bitcast); EXPECT_EQ(module->schedule() .sequence(computation) .instructions()[computation->instruction_count() - 4], remat_broadcast); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, ThroughBitcastRematInfiniteLoop) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true ENTRY %mycomp (param: f32[1]) -> f32[1024] { %param = f32[1]{0} parameter(0) %reshape = f32[] reshape(f32[1]{0} %param) %broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={} %bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast) %broadcast2 = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={} %bitcast2 = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast2) ROOT %add = f32[1024]{0} add(f32[1024]{0} %bitcast, f32[1024]{0} %bitcast2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto* computation = module->entry_computation(); const HloInstruction* add = computation->root_instruction(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 1024, module.get())); ASSERT_THAT(add, op::Add(op::Bitcast(op::Broadcast(_)), op::Bitcast(op::Broadcast(_)))); EXPECT_TRUE(changed); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShape) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_mul_comp { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={} %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={} %add = f32[1024] add(%x, %y) %mul = f32[1024] multiply(%x, %y) ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul) } ENTRY %entry { %param.0 = f32[] parameter(0) %param.1 = f32[] parameter(1) %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop, calls=%add_mul_comp %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0 %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1) %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={} %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1) %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1 ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); const HloComputation* computation = module->entry_computation(); const HloInstruction* add = computation->root_instruction(); ASSERT_THAT(add, op::Add(op::Multiply(), op::GetTupleElement(op::Fusion()))); const HloInstruction* fusion = add->operand(0)->operand(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 11 * 1024, module.get())); EXPECT_TRUE(changed); ASSERT_THAT( add, op::Add(op::Multiply(), AllOf(op::Fusion(), ::testing::Ne(fusion)))); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShapeDoubleUse) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_mul_comp { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={} %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={} %add = f32[1024] add(%x, %y) %mul = f32[1024] multiply(%x, %y) ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul) } ENTRY %entry { %param.0 = f32[] parameter(0) %param.1 = f32[] parameter(1) %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop, calls=%add_mul_comp %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0 %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1) %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={} %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1) %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1 %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0 %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2) ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); const HloComputation* computation = module->entry_computation(); const HloInstruction* add = computation->root_instruction(); ASSERT_THAT(add, op::Multiply(op::Add(op::Multiply(), op::GetTupleElement(op::Fusion())), op::GetTupleElement(op::Fusion()))); const HloInstruction* fusion = add->operand(0)->operand(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 11 * 1024, module.get())); EXPECT_TRUE(changed); ASSERT_THAT( add, op::Multiply( op::Add(op::Multiply(), op::GetTupleElement(AllOf( op::Fusion(), ::testing::Ne(fusion)))), op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion))))); EXPECT_EQ(add->operand(0)->operand(1)->operand(0), add->operand(1)->operand(0)); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShapeThroughBitcasts) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_mul_comp { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={} %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={} %add = f32[1024] add(%x, %y) %mul = f32[1024] multiply(%x, %y) ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul) } ENTRY %entry { %param.0 = f32[] parameter(0) %param.1 = f32[] parameter(1) %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop, calls=%add_mul_comp %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0 %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1) %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={} %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1) %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1 %bc.1 = f32[1024,1]{0,1} bitcast(%mul) %bc.2 = f32[1024,1]{0,1} bitcast(%gte.2) ROOT %add.2 = f32[1024,1]{0,1} add(f32[1024,1]{0,1} %bc.1, f32[1024,1]{0,1} %bc.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); const HloComputation* computation = module->entry_computation(); const HloInstruction* add = computation->root_instruction(); ASSERT_THAT(add, op::Add(op::Bitcast(op::Multiply()), op::Bitcast(op::GetTupleElement(op::Fusion())))); const HloInstruction* fusion = add->operand(0)->operand(0)->operand(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 11 * 1024, module.get())); EXPECT_TRUE(changed); ASSERT_THAT(add, op::Add(op::Bitcast(op::Multiply()), op::Bitcast(AllOf(op::Fusion(), ::testing::Ne(fusion))))); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematThroughTuple) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_mul_comp { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={} %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={} %add = f32[1024] add(%x, %y) %mul = f32[1024] multiply(%x, %y) ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul) } ENTRY %entry { %param.0 = f32[] parameter(0) %param.1 = f32[] parameter(1) %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop, calls=%add_mul_comp %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0 %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1 %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3) %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={} %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1) %tpl = (f32[1024]{0}, f32[1024]{0}) tuple(%gte.1, %add) %bc.1 = f32[1024,1]{0,1} bitcast(%mul) %gte.2 = f32[1024]{0} get-tuple-element(%tpl), index=0 ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %gte.2, f32[1024]{0} %add) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); const HloComputation* computation = module->entry_computation(); const HloInstruction* add = computation->root_instruction(); ASSERT_THAT(add, op::Add(op::GetTupleElement( op::Tuple(op::GetTupleElement(op::Fusion()), _)), op::Add())); const HloInstruction* tuple = add->operand(0)->operand(0); const HloInstruction* fusion = tuple->operand(0)->operand(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 11 * 1024, module.get())); EXPECT_TRUE(changed); ASSERT_THAT(add, op::Add(AllOf(op::Fusion(), ::testing::Ne(tuple), ::testing::Ne(fusion)), op::Add())); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, AllGatherChannelId) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true ENTRY %mycomp (param: f32[1]) -> f32[1] { %param = f32[1]{0} parameter(0) %reshape = f32[] reshape(f32[1]{0} %param) %broadcast = f32[256,1]{1,0} broadcast(f32[] %reshape), dimensions={} %ag = f32[1024,1]{1,0} all-gather(f32[256,1]{1,0} %broadcast), dimensions={0}, channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true %bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %ag) %negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %ag) %concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate, f32[1024,1]{1,0} %negate), dimensions={0} %slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate), slice={[0:1], [0:1]} %bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice) %concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast, f32[1]{0} %bitcast.1), dimensions={0} ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto* computation = module->entry_computation(); const HloInstruction* slice = computation->root_instruction(); ASSERT_THAT(slice, op::Slice(op::Concatenate( op::Bitcast(op::AllGather(op::Broadcast(_))), _))); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 14 * 1024, module.get())); EXPECT_TRUE(changed); EXPECT_EQ(computation->root_instruction(), slice); const HloInstruction* original_ag = FindInstruction(module.get(), "ag"); const HloInstruction* remat_ag = FindInstruction(module.get(), "ag.remat"); EXPECT_NE(remat_ag, nullptr); EXPECT_TRUE(original_ag->channel_id().has_value()); EXPECT_TRUE(remat_ag->channel_id().has_value()); EXPECT_EQ(*remat_ag->channel_id(), *original_ag->channel_id() + 1); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleArgFusion) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_mul_comp { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={} %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={} %add = f32[1024] add(%x, %y) %mul = f32[1024] multiply(%x, %y) ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul) } %add_comp { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) ROOT %add = add(%p0, %p1) } %add_tuple_comp { %p = (f32[1024]{0}, f32[1024]{0}) parameter(0) %p0 = get-tuple-element(%p), index=0 %p1 = get-tuple-element(%p), index=1 ROOT %add = add(%p0, %p1) } ENTRY %entry { %param.0 = f32[] parameter(0) %param.1 = f32[] parameter(1) %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop, calls=%add_mul_comp %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0 %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1 %add.0 = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3) %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={} %add.1 = f32[1024]{0} add(f32[1024]{0} %add.0, f32[1024]{0} %broadcast.1) %c = f32[] constant(0) %reduce = f32[] reduce(%add.1, %c), dimensions={0}, to_apply=add_comp %fus.1 = f32[1024]{0} fusion(%fus), kind=kLoop, calls=%add_tuple_comp ROOT %tuple = tuple(%reduce, %fus.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); const HloComputation* computation = module->entry_computation(); const HloInstruction* root = computation->root_instruction(); ASSERT_THAT(root, op::Tuple(op::Reduce(), op::Fusion(op::Fusion()))); const HloInstruction* fusion1 = root->operand(1); const HloInstruction* fusion0 = fusion1->operand(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 11 * 1024, module.get())); EXPECT_TRUE(changed); ASSERT_THAT( root, op::Tuple(op::Reduce(), op::Fusion(AllOf(op::Fusion(), ::testing::Ne(fusion0))))); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } TEST_F(RecomputeAndCompressHloRematerializationTest, RematFusionUpdateSchedule) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %custom_call_comp { %p = f32[1024]{0} parameter(0) ROOT %n = f32[1024]{0} negate(p) } %add_mul_comp { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %x = f32[1024]{0} broadcast(f32[] %p0), dimensions={} %y = f32[1024]{0} broadcast(f32[] %p1), dimensions={} %add = f32[1024] add(%x, %y) %mul = f32[1024] multiply(%x, %y) %c = f32[1024] custom-call(%mul), custom_call_target="SomeCall", called_computations={custom_call_comp} ROOT %out = (f32[1024], f32[1024]) tuple(%add, %c) } ENTRY %entry { %param.0 = f32[] parameter(0) %param.1 = f32[] parameter(1) %fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop, calls=%add_mul_comp %gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0 %add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1) %broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={} %mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1) %gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1 %gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0 %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2) ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); const HloComputation* computation = module->entry_computation(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 11 * 1024, module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); const HloInstruction* add = computation->root_instruction(); const HloInstruction* fusion = add->operand(0)->operand(0); ASSERT_THAT( add, op::Multiply( op::Add(op::Multiply(), op::GetTupleElement(AllOf( op::Fusion(), ::testing::Ne(fusion)))), op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion))))); const HloInstruction* fusion0 = add->operand(0)->operand(1)->operand(0); const HloInstruction* fusion1 = add->operand(1)->operand(0); auto it = std::find_if(fusion0->fused_instructions().begin(), fusion0->fused_instructions().end(), [](const HloInstruction* instr) { return instr->opcode() == HloOpcode::kCustomCall; }); ASSERT_NE(it, fusion0->fused_instructions().end()); auto it2 = std::find_if(fusion1->fused_instructions().begin(), fusion1->fused_instructions().end(), [](const HloInstruction* instr) { return instr->opcode() == HloOpcode::kCustomCall; }); ASSERT_NE(it2, fusion1->fused_instructions().end()); EXPECT_TRUE(module->schedule().is_computation_scheduled( (*it)->called_computations()[0])); EXPECT_TRUE(module->schedule().is_computation_scheduled( (*it2)->called_computations()[0])); CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } class CompressingRematerializationTest : public RematerializationTestBase { protected: static int64_t ShapeSizePadMinorTo64(const Shape& shape) { if (shape.IsTuple()) { return 4; } Shape descending_shape = ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape); int64_t size = ShapeUtil::ByteSizeOfPrimitiveType(descending_shape.element_type()); for (int64_t i = 0; i < descending_shape.rank(); ++i) { int64_t dim = descending_shape.dimensions(i); if (i == descending_shape.rank() - 1) { dim = RoundUpTo<int64_t>(dim, 64); } size *= dim; } return size; } static absl::StatusOr<Shape> ChooseCompactLayoutForShape(const Shape& shape) { if (shape.rank() != 2) { return shape; } Shape result = shape; Layout layout = result.layout(); int64_t most_minor_index = layout.minor_to_major()[0]; int64_t second_minor_index = layout.minor_to_major()[1]; int64_t most_minor = result.dimensions(most_minor_index); int64_t second_minor = result.dimensions(second_minor_index); if (most_minor < second_minor) { Layout new_layout = layout; new_layout.set_minor_to_major(0, second_minor_index); new_layout.set_minor_to_major(1, most_minor_index); *result.mutable_layout() = new_layout; } return result; } absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes, HloModule* module, int64_t min_remat_size = 0) { TF_EXPECT_OK(verifier().Run(module).status()); HloRematerialization::RematerializationModeConfig config( false, true, false); auto shape_size_func = [](const Shape& shape) { return ShapeSizePadMinorTo64(shape); }; HloCostAnalysis cost_analysis(shape_size_func); HloRematerialization::Options options( cost_analysis, config, memory_limit_bytes, 1, 1, min_remat_size, ChooseCompactLayoutForShape, std::nullopt, {}); HloRematerialization::RematerializationSizes sizes; HloRematerialization remat(options, sizes); return remat.Run(module); } }; TEST_F(CompressingRematerializationTest, OnlyRematBigBuffer) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_float { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(f32[] %x, f32[] %y) } ENTRY %entry { %param.0 = f32[] parameter(0) %constant = f32[] constant(0) %broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={} %broadcast.1 = f32[10,2]{1,0} broadcast(f32[] %param.0), dimensions={} %negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0) %reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %reduce.2 = f32[] reduce(f32[10,2]{1,0} %broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1) ROOT %add.2 = f32[] add(f32[] %add, f32[] %reduce.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 30 * 1024, module.get(), 10 * 1024)); EXPECT_TRUE(changed); HloInstruction* broadcast = module->entry_computation()->GetInstructionWithName("broadcast.0"); HloInstruction* broadcast_2 = module->entry_computation()->GetInstructionWithName("broadcast.1"); HloInstruction* reduce = module->entry_computation()->GetInstructionWithName("reduce.1"); HloInstruction* reduce_2 = module->entry_computation()->GetInstructionWithName("reduce.2"); EXPECT_THAT(reduce, op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant())); EXPECT_THAT(reduce_2, op::Reduce(broadcast_2, op::Constant())); } TEST_F(CompressingRematerializationTest, SingleRemat) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_float { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(f32[] %x, f32[] %y) } ENTRY %entry { %param.0 = f32[] parameter(0) %constant = f32[] constant(0) %broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={} %negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0) %reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 30 * 1024, module.get())); EXPECT_TRUE(changed); HloInstruction* broadcast = module->entry_computation()->GetInstructionWithName("broadcast.0"); HloInstruction* reduce = module->entry_computation()->GetInstructionWithName("reduce.1"); EXPECT_THAT(reduce, op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant())); } TEST_F(CompressingRematerializationTest, AvoidPathologicalCompress) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_float { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(f32[] %x, f32[] %y) } ENTRY %entry { %param.0 = f32[] parameter(0) %constant = f32[] constant(0) %broadcast.0 = f32[63,60]{1,0} broadcast(f32[] %param.0), dimensions={} %broadcast.1 = f32[16,64]{1,0} broadcast(f32[] %param.0), dimensions={} %reduce.0 = f32[] reduce(%broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %reduce.1 = f32[] reduce(%broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 16 * 1024, module.get())); EXPECT_FALSE(changed); HloInstruction* broadcast = module->entry_computation()->GetInstructionWithName("broadcast.0"); HloInstruction* reduce = module->entry_computation()->GetInstructionWithName("reduce.1"); EXPECT_THAT(reduce, op::Reduce(broadcast, op::Constant())); } TEST_F(CompressingRematerializationTest, AllUsersUseSameCopy) { const std::string& hlo_string = R"( HloModule fusion, is_scheduled=true %add_float { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(f32[] %x, f32[] %y) } ENTRY %entry { %param.0 = f32[] parameter(0) %constant = f32[] constant(0) %broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={} %negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0) %reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %reduce.1 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %reduce.2 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %add = f32[] add(f32[] %reduce.0, f32[] %reduce.1) %reduce.3 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float %add.2 = f32[] add(f32[] %reduce.2, f32[] %reduce.3) ROOT %tuple = (f32[], f32[]) tuple (f32[] add, f32[] add.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 30 * 1024, module.get())); EXPECT_TRUE(changed); HloInstruction* broadcast = module->entry_computation()->GetInstructionWithName("broadcast.0"); HloInstruction* reduce_2 = module->entry_computation()->GetInstructionWithName("reduce.2"); HloInstruction* reduce_3 = module->entry_computation()->GetInstructionWithName("reduce.3"); EXPECT_THAT(reduce_2, op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant())); EXPECT_THAT(reduce_3, op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant())); } class OffloadingRematerializationTest : public RematerializationTestBase { protected: absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes, HloModule* module, int64_t min_remat_size = 0) { TF_EXPECT_OK(verifier().Run(module).status()); if (!module->has_schedule()) { HloMemoryScheduler scheduler( [](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); }, ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler)); TF_EXPECT_OK(scheduler.Run(module).status()); } HloCostAnalysis::Options hlo_cost_analysis_options; hlo_cost_analysis_options.shape_size = [](const Shape& shape) { return ByteSizeOf(shape); }; hlo_cost_analysis_options.set_flops_per_second(flops_per_second_); hlo_cost_analysis_options.set_transcendentals_per_second( transcendentals_per_second_); HloCostAnalysis cost_analysis(hlo_cost_analysis_options); HloRematerialization::RematerializationModeConfig config( false, false, true); HloRematerialization::HostMemoryOffloadConfig host_memory_offload_config( kHostMemorySpaceColor, copy_to_host_speed_, copy_from_host_speed_); HloRematerialization::Options options( cost_analysis, config, memory_limit_bytes, 1, 1, min_remat_size, nullptr, host_memory_offload_config, {}); HloRematerialization::RematerializationSizes sizes; HloRematerialization remat(options, sizes); return remat.Run(module); } void SetCopyToHostSpeed(float val) { copy_to_host_speed_ = val; } void SetCopyFromHostSpeed(float val) { copy_from_host_speed_ = val; } void SetFlopsPerSecond(float val) { flops_per_second_ = val; } void SetTranscendentalsPerSecond(float val) { transcendentals_per_second_ = val; } static constexpr const int64_t kHostMemorySpaceColor{5}; private: float copy_to_host_speed_{1.0f}; float copy_from_host_speed_{1.0f}; float flops_per_second_{1.0f}; float transcendentals_per_second_{1.0f}; }; TEST_F(OffloadingRematerializationTest, BasicSuccessfulHostOffload) { const std::string& hlo_string = R"( HloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}} ENTRY MyModule { param_0 = f32[1024]{0} parameter(0) param_1 = f32[1024]{0} parameter(1) res_3 = f32[1024]{0} add(param_0, param_1) res_4 = f32[1024]{0} tanh(res_3) res_5 = f32[1024]{0} tanh(res_4) res_6 = f32[1024]{0} tanh(res_5) res_7 = f32[1024]{0} add(res_6, res_6) res_8 = f32[1024]{0} add(res_7, res_5) res_9 = f32[1024]{0} add(res_8, res_4) res_10 = f32[1024]{0} add(res_9, res_3) ROOT res_11 = f32[1024]{0} tanh(res_10) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); SetCopyToHostSpeed(4.0 * 1024); SetCopyFromHostSpeed(4.0 * 1024); SetFlopsPerSecond(2 * 1024); SetTranscendentalsPerSecond(2 * 1024); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 10 * 1024, module.get())); ASSERT_TRUE(changed); ASSERT_TRUE(module->has_schedule()); auto res_3_matcher = op::Add(op::Parameter(), op::Parameter()); auto res_3_rematted_matcher = op::AsyncCopy( xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor, op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace, res_3_matcher)); auto res_4_matcher = op::Tanh(res_3_matcher); auto res_4_rematted_matcher = op::AsyncCopy( xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor, op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace, res_4_matcher)); auto res_5_matcher = op::Tanh(res_4_matcher); auto res_6_matcher = op::Tanh(res_5_matcher); auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher); auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher); auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher); auto res_10_matcher = op::Add(res_9_matcher, res_3_rematted_matcher); const auto instruction_sequence = module->schedule().sequence(module->entry_computation()); ASSERT_THAT(instruction_sequence.instructions().back(), op::Tanh(res_10_matcher)); } TEST_F(OffloadingRematerializationTest, SkipOffloadWhenBitcastIsInvolved) { const std::string& hlo_string = R"( HloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}} ENTRY MyModule { param_0 = f32[1024]{0} parameter(0) param_1 = f32[1024]{0} parameter(1) res_3 = f32[1024]{0} add(param_0, param_1) bitcast = f32[1024]{0} bitcast(res_3) res_4 = f32[1024]{0} tanh(res_3) res_5 = f32[1024]{0} tanh(res_4) res_6 = f32[1024]{0} tanh(res_5) res_7 = f32[1024]{0} add(res_6, res_6) res_8 = f32[1024]{0} add(res_7, res_5) res_9 = f32[1024]{0} add(res_8, res_4) res_10 = f32[1024]{0} add(res_9, bitcast) ROOT res_11 = f32[1024]{0} tanh(res_10) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); SetCopyToHostSpeed(4.0 * 1024); SetCopyFromHostSpeed(4.0 * 1024); SetFlopsPerSecond(2 * 1024); SetTranscendentalsPerSecond(2 * 1024); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 10 * 1024, module.get())); ASSERT_TRUE(changed); ASSERT_TRUE(module->has_schedule()); auto res_3_matcher = op::Add(op::Parameter(), op::Parameter()); auto res_4_matcher = op::Tanh(res_3_matcher); auto res_4_rematted_matcher = op::AsyncCopy( xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor, op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace, res_4_matcher)); auto res_5_matcher = op::Tanh(res_4_matcher); auto res_6_matcher = op::Tanh(res_5_matcher); auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher); auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher); auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher); auto res_10_matcher = op::Add(res_9_matcher, op::Bitcast(res_3_matcher)); const auto instruction_sequence = module->schedule().sequence(module->entry_computation()); ASSERT_THAT(instruction_sequence.instructions().back(), op::Tanh(res_10_matcher)); } class IndirectUseTest : public RecomputeAndCompressHloRematerializationTest, public ::testing::WithParamInterface<bool> {}; TEST_P(IndirectUseTest, IndirectUseRematerialized) { const bool indirectly_used = GetParam(); auto module = CreateNewVerifiedModule(); HloComputation* subcomputation = nullptr; { auto builder = HloComputation::Builder(TestName() + ".subcomputation"); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, vec1024_shape_, "param")); auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate( ShapeUtil::MakeShape(xla::F32, {2048}), {param, param}, 0)); builder.AddInstruction(HloInstruction::CreateSlice( vec1024_shape_, concat, {0}, {1024}, {1})); subcomputation = module->AddEmbeddedComputation(builder.Build()); } auto builder = HloComputation::Builder(TestName()); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, scalar_shape_, "param")); auto bcast = builder.AddInstruction( HloInstruction::CreateBroadcast(vec1024_shape_, param, {})); auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, bcast, bcast)); auto call_1 = builder.AddInstruction( HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation)); auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary( vec1024_shape_, HloOpcode::kAdd, bcast, call_1)); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({bcast, add_2})); auto gte = builder.AddInstruction(HloInstruction::CreateGetTupleElement( vec1024_shape_, tuple, indirectly_used ? 0 : 1)); builder.AddInstruction( HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, gte)); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_EQ(entry_computation->instruction_count(), 8); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization( 22 * 1024, module.get())); if (indirectly_used) { EXPECT_TRUE(changed); EXPECT_EQ(entry_computation->instruction_count(), 3); } else { EXPECT_TRUE(changed); EXPECT_EQ(entry_computation->instruction_count(), 9); } CheckForRematInInstructionNames( ::testing::UnitTest::GetInstance()->current_test_info()->name()); } INSTANTIATE_TEST_SUITE_P(IndirectUseTestInstantiation, IndirectUseTest, ::testing::Values(true, false)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4c6920cc-3aba-4b4e-867d-febd43cab405
cpp
tensorflow/tensorflow
llvm_compiler
third_party/xla/xla/service/llvm_compiler.cc
third_party/xla/xla/tests/llvm_compiler_test.cc
#include "xla/service/llvm_compiler.h" #include <memory> #include <utility> #include <vector> #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_module_group.h" #include "xla/service/executable.h" #include "xla/service/stream_pool.h" #include "tsl/platform/denormal.h" #include "tsl/platform/statusor.h" #include "tsl/profiler/lib/scoped_annotation.h" #ifdef __FAST_MATH__ #error "Don't build XLA with -ffast-math" #endif namespace xla { absl::StatusOr<std::vector<std::unique_ptr<Executable>>> LLVMCompiler::Compile( std::unique_ptr<HloModuleGroup> module_group, std::vector<std::vector<se::StreamExecutor*>> stream_execs, const CompileOptions& options) { tsl::port::ScopedDontFlushDenormal dont_flush_denormals; std::vector<std::unique_ptr<Executable>> result; std::vector<std::unique_ptr<HloModule>> modules = module_group->ConsumeModules(); for (size_t i = 0; i < modules.size(); i++) { tsl::profiler::ScopedAnnotation annotation{[&] { return absl::StrFormat("XlaCompile:#module=%s,program_id=%d#", modules[i]->name(), modules[i]->unique_id()); }}; TF_ASSIGN_OR_RETURN(modules[i], RunHloPasses(std::move(modules[i]), stream_execs[i][0], options)); TF_ASSIGN_OR_RETURN( std::unique_ptr<Executable> executable, RunBackend(std::move(modules[i]), stream_execs[i][0], options)); result.push_back(std::move(executable)); } return std::move(result); } }
#include "xla/service/llvm_compiler.h" #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/status/status.h" #include "llvm/IR/Module.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module_group.h" #include "xla/literal_util.h" #include "xla/service/backend.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/stream_executor.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/casts.h" #include "tsl/platform/test.h" #include "tsl/platform/threadpool.h" namespace xla { namespace { using LLVMCompilerTest = HloTestBase; const char* const kHloText = R"( HloModule Add ENTRY main { constant.0 = f32[] constant(42.0) constant.1 = f32[] constant(43.0) ROOT add.0 = f32[] add(constant.0, constant.1) } )"; TEST_F(LLVMCompilerTest, HooksTest) { int pre_opt_hook_call_count = 0; int post_opt_hook_call_count = 0; auto pre_opt_hook = [&pre_opt_hook_call_count](const llvm::Module&) { ++pre_opt_hook_call_count; return absl::OkStatus(); }; auto post_opt_hook = [&post_opt_hook_call_count](const llvm::Module&) { ++post_opt_hook_call_count; return absl::OkStatus(); }; auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value(); LLVMCompiler* compiler = tensorflow::down_cast<xla::LLVMCompiler*>(backend().compiler()); compiler->SetPreOptimizationHook(pre_opt_hook); compiler->SetPostOptimizationHook(post_opt_hook); ASSERT_TRUE(compiler ->RunBackend(std::move(hlo_module), backend().default_stream_executor(), nullptr) .ok()); EXPECT_EQ(1, pre_opt_hook_call_count); EXPECT_EQ(1, post_opt_hook_call_count); } TEST_F(LLVMCompilerTest, DISABLED_MultiModuleCompilation) { auto hlo_module = ParseAndReturnVerifiedModule(kHloText).value(); auto hlo_module2 = ParseAndReturnVerifiedModule(kHloText).value(); std::vector<std::unique_ptr<HloModule>> modules; modules.push_back(std::move(hlo_module)); modules.push_back(std::move(hlo_module2)); auto module_group = std::make_unique<HloModuleGroup>("test_module_group", std::move(modules)); std::vector<std::vector<se::StreamExecutor*>> executors; executors.push_back({backend().default_stream_executor()}); executors.push_back({backend().default_stream_executor()}); EXPECT_IS_OK(backend().compiler()->Compile(std::move(module_group), std::move(executors), backend().memory_allocator())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_compiler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/llvm_compiler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
134981e1-f110-441e-b364-4076f0bc05ec
cpp
tensorflow/tensorflow
gpu_p2p_pipeliner
third_party/xla/xla/service/gpu/gpu_p2p_pipeliner.cc
third_party/xla/xla/service/gpu/gpu_p2p_pipeliner_test.cc
#include "xla/service/gpu/gpu_p2p_pipeliner.h" #include <cstdint> #include <functional> #include <string> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/collective_pipeliner.h" #include "xla/service/hlo_parser.h" #include "xla/util.h" namespace xla { namespace gpu { namespace { bool ShouldPipeline(const HloInstruction* instr) { if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) { return false; } auto it = instr->frontend_attributes().map().find(kSendRecvPipelineAttr); if (it == instr->frontend_attributes().map().end()) { return false; } auto allowed_predecessor = [&]() { return instr->opcode() == HloOpcode::kRecvDone && instr->control_predecessors().size() == 1 && instr->control_predecessors()[0]->opcode() == HloOpcode::kSend; }; if (!instr->control_successors().empty() || (!instr->control_predecessors().empty() && !allowed_predecessor())) { return false; } bool is_pipelined = (instr->user_count() == 1 && instr->parent() != nullptr && instr->users()[0] == instr->parent()->root_instruction()); return !is_pipelined; } bool ShouldAllowLoopVariantParameterInChain(const HloInstruction* instr) { CHECK(instr->opcode() == HloOpcode::kGetTupleElement && instr->operand(0)->opcode() == HloOpcode::kParameter); return true; } absl::Status PostprocessP2PImpl( HloInstruction* instr, std::function<std::string(std::vector<ReplicaGroup>&)> transformer) { if (!HloPredicateIsOp<HloOpcode::kRecvDone, HloOpcode::kSendDone>(instr)) { return Internal("Expected SendDone/RecvDone as the pipelined collective"); } instr = instr->mutable_operand(0); if (!HloPredicateIsOp<HloOpcode::kRecv, HloOpcode::kSend>(instr)) { return Internal("Expected Send/Recv as the SendDone/RecvDone operand"); } auto validation_it = instr->frontend_attributes().map().find(kSendRecvValidationAttr); if (validation_it == instr->frontend_attributes().map().end() || validation_it->second == "invalid") { return absl::OkStatus(); } auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second); if (!statusor_bounds.ok()) { return statusor_bounds.status(); } std::string validation_attr = transformer(statusor_bounds.value()); xla::FrontendAttributes attributes = instr->frontend_attributes(); (*attributes.mutable_map())[kSendRecvValidationAttr] = validation_attr; instr->set_frontend_attributes(attributes); return absl::OkStatus(); } absl::Status PostprocessPeeledP2P(HloInstruction* instr) { auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) { std::vector<std::pair<int64_t, int64_t>> bounds; bounds.reserve(replica_groups.size()); bool all_invalid = true; for (const auto& replica_group : replica_groups) { int64_t lower_bound = replica_group.replica_ids(0); int64_t upper_bound = replica_group.replica_ids(1); if (lower_bound <= 0 && upper_bound >= 0) { all_invalid = false; bounds.push_back({0, 0}); } else { bounds.push_back({1, 0}); } } std::string validation_attr; if (all_invalid) { validation_attr = "invalid"; } else { validation_attr = "{" + absl::StrJoin(bounds, ",", absl::PairFormatter( [](std::string* out, int64_t value) { absl::StrAppend(out, "{", value); }, ",", [](std::string* out, int64_t value) { absl::StrAppend(out, value, "}"); })) + "}"; } return validation_attr; }; return PostprocessP2PImpl(instr, transform_bounds); }; absl::Status PostprocessRotatedP2P(HloInstruction* instr) { auto transform_bounds = [&](std::vector<ReplicaGroup>& replica_groups) { std::vector<std::pair<int64_t, int64_t>> bounds; bounds.reserve(replica_groups.size()); bool all_invalid = true; for (const auto& replica_group : replica_groups) { int64_t lower_bound = replica_group.replica_ids(0); int64_t upper_bound = replica_group.replica_ids(1); if (lower_bound <= upper_bound) { if (lower_bound >= 1) { --lower_bound; } if (upper_bound >= 1) { --upper_bound; } if (lower_bound <= upper_bound) { all_invalid = false; bounds.push_back({lower_bound, upper_bound}); } else { bounds.push_back({1, 0}); } } else { bounds.push_back({lower_bound, upper_bound}); } } std::string validation_attr; if (all_invalid) { validation_attr = "invalid"; } else { validation_attr = "{" + absl::StrJoin(bounds, ",", absl::PairFormatter( [](std::string* out, int64_t value) { absl::StrAppend(out, "{", value); }, ",", [](std::string* out, int64_t value) { absl::StrAppend(out, value, "}"); })) + "}"; } return validation_attr; }; return PostprocessP2PImpl(instr, transform_bounds); } } void AddP2PPipeliner(HloPassPipeline& pipeline) { CollectivePipeliner::Config config{ 0, INT64_MAX, true, false, true, CollectivePipeliner::PipeliningDirection::kBackward, ShouldPipeline, HloPredicateTrue, HloPredicateTrue, ShouldAllowLoopVariantParameterInChain, true, PostprocessPeeledP2P, PostprocessRotatedP2P}; pipeline.AddPass<CollectivePipeliner>(config); } } }
#include "xla/service/gpu/gpu_p2p_pipeliner.h" #include <cstdint> #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_parser.h" #include "xla/service/hlo_verifier.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" namespace xla { namespace gpu { namespace { class GpuP2PPipelinerTest : public HloTestBase { public: GpuP2PPipelinerTest() { const int64_t kNumReplicas = 1; const int64_t kNumComputations = 4; config_ = GetModuleConfigForTest(kNumReplicas, kNumComputations); } absl::StatusOr<bool> RunOptimizer(HloModule* module) { HloPassPipeline pipeline("optimizer"); pipeline.AddPass<HloVerifier>(false, false); AddP2PPipeliner(pipeline); pipeline.AddPass<HloVerifier>(false, false); return pipeline.Run(module); } protected: HloModuleConfig config_; }; TEST_F(GpuP2PPipelinerTest, TransformRecvSendBackwardsWithMetaDataPostProcessing) { const char* kHloStr = R"( HloModule module cond { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(10) ROOT result = pred[] compare(count, ub), direction=LT } body { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 send-data = get-tuple-element(param), index=1 after-all.0 = token[] after-all() recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_pipeline="0", _xla_send_recv_validation="{{1,7}}" } after-all.0.s = token[] after-all() send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.s), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1,0}}", _xla_send_recv_pipeline="0", _xla_send_recv_validation="{{1,7}}" } recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" }, control-predecessors={send.0} recv-data = u32[2] get-tuple-element(recv-done.0), index=0 c1 = u32[] constant(1) new_count = u32[] add(count, c1) r = u32[2] broadcast(c1), dimensions={} s = u32[2] add(r, recv-data) send-done.0 = token[] send-done(send.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } ROOT result = (u32[], u32[2]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) c1 = u32[] constant(1) r = u32[] replica-id() a = u32[] add(c1, r) init = u32[2] broadcast(a), dimensions={} while_init = (u32[], u32[2]) tuple(c0, init) while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond ROOT result = u32[2] get-tuple-element(while_result), index=1 })"; auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value(); EXPECT_TRUE(RunOptimizer(module.get()).value()); XLA_VLOG_LINES(10, module->ToString()); auto while_op = FindInstruction(module.get(), "while"); EXPECT_EQ(while_op->opcode(), HloOpcode::kWhile); EXPECT_EQ(while_op->shape().tuple_shapes().size(), 5); auto recv1 = DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1")); EXPECT_NE(recv1, nullptr); auto recv2 = DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2")); EXPECT_NE(recv2, nullptr); EXPECT_EQ(recv1->channel_id(), recv2->channel_id()); auto send1 = DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1")); EXPECT_NE(send1, nullptr); auto send2 = DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2")); EXPECT_NE(send2, nullptr); EXPECT_EQ(send1->channel_id(), send2->channel_id()); const char* kPeeledAttr = "_xla_send_recv_validation=\"invalid\""; const char* kRotatedAttr = "_xla_send_recv_validation={{0,6}}"; EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kPeeledAttr)); EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kPeeledAttr)); EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kRotatedAttr)); EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kRotatedAttr)); } TEST_F(GpuP2PPipelinerTest, SendRecvForwardCycle) { const char* kHloStr = R"( HloModule test while_body { inputs = (u32[], f32[2,2], f32[2,2]) parameter(0) iter = u32[] get-tuple-element(inputs), index=0 iter_increment = u32[] constant(1) next_iter = u32[] add(iter, iter_increment) weights = f32[2,2] get-tuple-element(inputs), index=2 partition-id = u32[] partition-id() zero = u32[] constant(0) compare = pred[] compare(partition-id, zero), direction=EQ broadcast = pred[2,2] broadcast(compare), dimensions={} data = f32[2,2] get-tuple-element(inputs), index=1 after-all = token[] after-all() send = (f32[2,2], u32[], token[]) send(data, after-all), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0", _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_validation="{{3,10}}" } recv = (f32[2,2], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0", _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_validation="{{3,10}}" } recv-done = (f32[2,2], token[]) recv-done(recv), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"}, control-predecessors={send} recv-done-data = f32[2,2] get-tuple-element(recv-done), index=0 after-all.1 = token[] after-all() send.1 = (f32[2,2], u32[], token[]) send(data, after-all.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1", _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}", _xla_send_recv_validation="{{0,7},{1,8},{2,9}}" } recv.1 = (f32[2,2], u32[], token[]) recv(after-all.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1", _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3}}", _xla_send_recv_validation="{{0,7},{1,8},{2,9}}" } recv-done.1 = (f32[2,2], token[]) recv-done(recv.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"}, control-predecessors={send.1} recv-done-1-data = f32[2,2] get-tuple-element(recv-done.1), index=0 select = f32[2,2] select(broadcast, recv-done-data, recv-done-1-data) matmul = f32[2,2] dot(weights, select), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT result = (u32[], f32[2,2], f32[2,2]) tuple(next_iter, matmul, weights) send-done = token[] send-done(send), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} send-done.1 = token[] send-done(send.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} } while_cond { inputs = (u32[], f32[2,2], f32[2,2]) parameter(0) iter = u32[] get-tuple-element(inputs), index=0 max_iter = u32[] constant(3) ROOT compare = pred[] compare(iter, max_iter), direction=LT } ENTRY test_computation { start_iter = u32[] constant(0) input_data = f32[2,2] parameter(0) input_weights = f32[2,2] parameter(1) input = (u32[], f32[2,2], f32[2,2]) tuple(start_iter, input_data, input_weights) while_result = (u32[], f32[2,2], f32[2,2]) while(input), condition=while_cond, body=while_body ROOT data_out = f32[2,2] get-tuple-element(while_result), index=1 } )"; auto module = ParseAndReturnUnverifiedModule(kHloStr, config_).value(); EXPECT_TRUE(RunOptimizer(module.get()).value()); EXPECT_TRUE(RunFileCheck(module->ToString(), R"( CHECK: %[[RECV_BWD_START:.*]] = {{.*}} after-all() CHECK: %[[RECV_BWD:.*]] = {{.*}} recv(token[] %[[RECV_BWD_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation={{[{][{]}}2,9{{[}][}]}}} CHECK: %[[RECV_DONE_BWD:.*]] = {{.*}} recv-done({{.*}} %[[RECV_BWD:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %[[RECV_FWD_START:.*]] = {{.*}} after-all() CHECK: %[[RECV_FWD:.*]] = {{.*}} recv(token[] %[[RECV_FWD_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,6},{0,7},{1,8{{[}][}]}}} CHECK: %[[RECV_DONE_FWD:.*]] = {{.*}} recv-done((f32[2,2]{1,0}, u32[], token[]) %[[RECV_FWD:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK: %[[SEND_BWD:.*]] = {{.*}} send({{.*}} %[[RECV_BWD_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation={{[{][{]}}2,9{{[}][}]}}} CHECK: %[[SEND_DONE_BWD:.*]] = {{.*}} send-done({{.*}} %[[SEND_BWD:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %[[SEND_FWD:.*]] = {{.*}} send({{.*}} %[[RECV_FWD_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,6},{0,7},{1,8{{[}][}]}}} CHECK: %[[SEND_DONE_FWD:.*]] = {{.*}} send-done({{.*}} %[[SEND_FWD:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK: %[[WHILE_COND:.*]] (cond_param: {{.*}} CHECK-NEXT: %[[COND_PARAM:.*]] = {{.*}} parameter(0) CHECK: %[[CURRENT_ITER:.*]] = {{.*}} get-tuple-element({{.*}} %[[COND_PARAM:.*]]), index=0 CHECK: %[[TWO:.*]] = {{.*}} constant(2) CHECK: ROOT %[[COMPARE:.*]] = pred[] compare({{.*}} %[[CURRENT_ITER:.*]], {{.*}} %[[TWO:.*]]), direction=LT CHECK: ENTRY %[[TEST_COMPUTATION:.*]] (input_data: {{.*}} CHECK: %[[RECV_BWD_DUMMY_START:.*]] = {{.*}} after-all() CHECK: %[[RECV_BWD_DUMMY:.*]] = {{.*}} recv(token[] %[[RECV_BWD_DUMMY_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation="invalid"} CHECK: %[[RECV_DONE_BWD_DUMMY:.*]] = {{.*}} recv-done({{.*}} %[[RECV_BWD_DUMMY:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %[[RECV_FWD_FIRST_ITER_START:.*]] = {{.*}} after-all() CHECK: %[[RECV_FWD_FIRST_ITER:.*]] = {{.*}} recv(token[] %[[RECV_FWD_FIRST_ITER_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,0},{1,0},{1,0{{[}][}]}}} CHECK: %[[RECV_DONE_FWD_FIRST_ITER:.*]] = {{.*}} recv-done({{.*}} %[[RECV_FWD_FIRST_ITER:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK: %[[SEND_BWD_DUMMY:.*]] = {{.*}} send({{.*}} %[[RECV_DUMMY_START:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{[{][{]}}3,0{{[}][}]}},_xla_send_recv_validation="invalid"} CHECK: %[[SEND_DONE_BWD_DUMMY:.*]] = {{.*}} send-done({{.*}} %[[SEND_BWD_DUMMY:.*]]), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %[[SEND_FWD_FIRST_ITER:.*]] = {{.*}} send({{.*}} %[[RECV_FWD_FIRST_ITER_START:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{[{][{]}}0,1},{1,2},{2,3{{[}][}]}},_xla_send_recv_validation={{[{][{]}}0,0},{1,0},{1,0{{[}][}]}}} CHECK: %[[SEND_DONE_FWD_FIRST_ITER:.*]] = {{.*}} send-done({{.*}} %[[SEND_FWD_FIRST_ITER:.*]]), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK: %[[START_LOOP_FROM_ITER_ONE:.*]] = u32[] constant(1) CHECK: %[[LOOP_INPUT:.*]] = {{.*}} tuple({{.*}} %[[START_LOOP_FROM_ITER_ONE:.*]]) CHECK: %[[WHILE:.*]] = {{.*}} while({{.*}} %[[LOOP_INPUT:.*]]), {{.*}} )") .value()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_p2p_pipeliner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_p2p_pipeliner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
25f2c367-30d8-42b1-ae1a-153d697a43ab
cpp
tensorflow/tensorflow
gpu_float_support
third_party/xla/xla/service/gpu/gpu_float_support.cc
third_party/xla/xla/service/gpu/gpu_float_support_test.cc
#include "xla/service/gpu/gpu_float_support.h" #include <utility> #include <variant> #include "absl/log/check.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/float_support.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/stream_executor/device_description.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { bool GpuFloatSupport::SupportsMixedPrecisions(const HloInstruction& hlo) const { if (FloatSupport::SupportsMixedPrecisions(hlo)) return true; switch (hlo.opcode()) { case HloOpcode::kDot: { CHECK_GE(hlo.operand_count(), HloDotInstruction::kOperands); const PrimitiveType lhs_type = hlo.operand(0)->shape().element_type(); const PrimitiveType rhs_type = hlo.operand(1)->shape().element_type(); const PrimitiveType result_type = hlo.shape().element_type(); return (lhs_type == F16 && rhs_type == F16 && result_type == F32) || (lhs_type == BF16 && rhs_type == BF16 && result_type == F32); } default: return false; } } bool GpuFloatSupport::IsSupported(const HloInstruction& hlo) const { switch (hlo.opcode()) { case HloOpcode::kAllReduce: case HloOpcode::kAllReduceStart: case HloOpcode::kAllReduceDone: case HloOpcode::kReduceScatter: case HloOpcode::kDot: using TypeAndCC = std::pair< PrimitiveType, stream_executor::CudaComputeCapability::CudaComputeCapabilities>; for (auto [type, cc] : {TypeAndCC(F8E4M3FN, se::CudaComputeCapability::AMPERE), TypeAndCC(F8E5M2, se::CudaComputeCapability::HOPPER)}) { if (LowPrecisionType() == type) { auto* cuda_compute_capability = std::get_if<se::CudaComputeCapability>(&compute_capability_); return cuda_compute_capability && cuda_compute_capability->IsAtLeast(cc) && IsTritonFusedComputation(*hlo.parent()); } } return LowPrecisionType() == BF16; case HloOpcode::kAllGather: case HloOpcode::kAllToAll: case HloOpcode::kBroadcast: case HloOpcode::kCollectivePermute: case HloOpcode::kConcatenate: case HloOpcode::kCopy: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kGather: case HloOpcode::kPad: case HloOpcode::kReshape: case HloOpcode::kReverse: case HloOpcode::kScatter: case HloOpcode::kSelect: case HloOpcode::kSelectAndScatter: case HloOpcode::kSlice: case HloOpcode::kTranspose: case HloOpcode::kBitcast: return true; case HloOpcode::kAdd: case HloOpcode::kSubtract: case HloOpcode::kMultiply: { if (LowPrecisionType() == BF16) { auto* cuda_compute_capability = std::get_if<se::CudaComputeCapability>(&compute_capability_); return cuda_compute_capability != nullptr && cuda_compute_capability->IsAtLeastHopper(); } return false; } default: return false; } } } }
#include "xla/service/gpu/gpu_float_support.h" #include <memory> #include <string> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/float_normalization.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/hlo_verifier.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" namespace xla::gpu { namespace { class FloatSupportTest : public HloTestBase { protected: FloatSupportTest() : HloTestBase(false, true) {} bool Normalize(HloModule* module, se::GpuComputeCapability cc, PrimitiveType low_precision_type, PrimitiveType high_precision_type) { GpuFloatSupport float_support(cc, low_precision_type, high_precision_type); FloatNormalization normalization(&float_support); absl::StatusOr<bool> result = normalization.Run(module); EXPECT_IS_OK(result.status()); HloVerifier verifier(false, true); EXPECT_IS_OK(verifier.Run(module).status()); return result.value(); } std::unique_ptr<HloComputation> CreateComputation(PrimitiveType lhs_type, PrimitiveType rhs_type, PrimitiveType result_type) { auto builder = HloComputation::Builder(TestName()); Shape lhs_shape = ShapeUtil::MakeShape(lhs_type, {3, 3}); Shape rhs_shape = ShapeUtil::MakeShape(rhs_type, {3, 3}); Shape result_shape = ShapeUtil::MakeShape(result_type, {3, 3}); HloInstruction* a = builder.AddInstruction( HloInstruction::CreateParameter(0, lhs_shape, "a")); HloInstruction* b = builder.AddInstruction( HloInstruction::CreateParameter(1, rhs_shape, "b")); PrecisionConfig precision_config; DotDimensionNumbers dot_dnums; dot_dnums.add_lhs_contracting_dimensions(1); dot_dnums.add_rhs_contracting_dimensions(1); builder.AddInstruction(HloInstruction::CreateDot( result_shape, a, b, dot_dnums, precision_config)); return builder.Build(); } void TestDotConversion(PrimitiveType lhs_type, PrimitiveType rhs_type, PrimitiveType result_type, se::GpuComputeCapability cc, bool should_convert_lhs, bool should_convert_rhs, PrimitiveType low_precision_type, PrimitiveType high_precision_type = F16) { auto module = CreateNewVerifiedModule(); auto computation = module->AddEntryComputation( CreateComputation(lhs_type, rhs_type, result_type)); EXPECT_EQ( Normalize(module.get(), cc, low_precision_type, high_precision_type), should_convert_lhs || should_convert_rhs); EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kDot); EXPECT_EQ(computation->root_instruction()->operand(0)->opcode() == HloOpcode::kConvert, should_convert_lhs); EXPECT_EQ(computation->root_instruction()->operand(1)->opcode() == HloOpcode::kConvert, should_convert_rhs); } void TestTritonFusedDot(PrimitiveType lhs_type, PrimitiveType rhs_type, PrimitiveType result_type, se::GpuComputeCapability cc, bool should_convert_lhs, bool should_convert_rhs, PrimitiveType low_precision_type, PrimitiveType high_precision_type = F16) { auto module = CreateNewVerifiedModule(); auto computation = module->AddComputationAndUnifyNamesAndIds( CreateComputation(lhs_type, rhs_type, result_type), false); Shape lhs_shape = ShapeUtil::MakeShape(lhs_type, {3, 3}); Shape rhs_shape = ShapeUtil::MakeShape(rhs_type, {3, 3}); Shape result_shape = ShapeUtil::MakeShape(result_type, {3, 3}); auto builder = HloComputation::Builder("main"); HloInstruction* a = builder.AddInstruction( HloInstruction::CreateParameter(0, lhs_shape, "a")); HloInstruction* b = builder.AddInstruction( HloInstruction::CreateParameter(1, rhs_shape, "b")); HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion( result_shape, HloInstruction::FusionKind::kCustom, {a, b}, computation)); GpuBackendConfig config; config.mutable_fusion_backend_config()->set_kind( std::string(kTritonGemmFusionKind)); CHECK_OK(fusion->set_backend_config(config)); module->AddEntryComputation(builder.Build()); EXPECT_EQ( Normalize(module.get(), cc, low_precision_type, high_precision_type), should_convert_lhs || should_convert_rhs); EXPECT_EQ(computation->root_instruction()->opcode(), HloOpcode::kDot); EXPECT_EQ(computation->root_instruction()->operand(0)->opcode() == HloOpcode::kConvert, should_convert_lhs); EXPECT_EQ(computation->root_instruction()->operand(1)->opcode() == HloOpcode::kConvert, should_convert_rhs); } }; TEST_F(FloatSupportTest, ShouldAlwaysConvertFp8Dot) { TestDotConversion(F8E4M3FN, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(), true, true, F8E4M3FN); TestDotConversion(F8E4M3FN, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(), true, true, F8E4M3FN); TestDotConversion(F8E4M3FN, F8E4M3FN, F16, se::CudaComputeCapability::Ampere(), true, true, F8E4M3FN); TestDotConversion(F8E4M3FN, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(), true, true, F8E4M3FN); TestDotConversion(F8E5M2, F8E5M2, F16, se::CudaComputeCapability::Ampere(), true, true, F8E5M2); TestDotConversion(F8E5M2, F8E5M2, F32, se::CudaComputeCapability::Ampere(), true, true, F8E5M2); TestDotConversion(F8E5M2, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(), true, false, F8E5M2); TestDotConversion(F8E5M2, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(), true, false, F8E5M2); TestDotConversion(F8E5M2, F16, F16, se::CudaComputeCapability::Hopper(), true, false, F8E5M2); TestDotConversion(F8E5M2, F16, F32, se::CudaComputeCapability::Hopper(), true, false, F8E5M2); } TEST_F(FloatSupportTest, ShouldConverTritonUnsupportedFp8Dot) { TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(), true, true, F8E4M3FN); TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(), false, false, F8E4M3FN); TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F16, se::CudaComputeCapability::Ampere(), true, true, F8E4M3FN); TestTritonFusedDot(F8E4M3FN, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(), false, false, F8E4M3FN); TestTritonFusedDot(F8E5M2, F8E5M2, F16, se::CudaComputeCapability::Ampere(), true, true, F8E5M2); TestTritonFusedDot(F8E5M2, F8E5M2, F32, se::CudaComputeCapability::Ampere(), true, true, F8E5M2); TestTritonFusedDot(F8E5M2, F8E4M3FN, F16, se::CudaComputeCapability::Hopper(), true, false, F8E5M2); TestTritonFusedDot(F8E5M2, F8E4M3FN, F32, se::CudaComputeCapability::Hopper(), false, false, F8E5M2); TestTritonFusedDot(F8E5M2, F16, F16, se::CudaComputeCapability::Hopper(), true, false, F8E5M2); TestTritonFusedDot(F8E5M2, F16, F32, se::CudaComputeCapability::Hopper(), true, false, F8E5M2); } TEST_F(FloatSupportTest, ShouldKeepBf16OnAmpere) { TestDotConversion(BF16, BF16, F32, se::CudaComputeCapability::Ampere(), false, false, BF16); } TEST_F(FloatSupportTest, ShouldKeepBf16OnHopper) { TestDotConversion(BF16, BF16, F32, se::CudaComputeCapability::Hopper(), false, false, BF16); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_float_support.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_float_support_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
914cb876-e268-4338-974a-bdc42eb9ee1a
cpp
tensorflow/tensorflow
triton_tiling_propagation
third_party/xla/xla/service/gpu/triton_tiling_propagation.cc
third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc
#include "xla/service/gpu/triton_tiling_propagation.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <iterator> #include <list> #include <optional> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/layout.h" #include "xla/permutation_util.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/fusions/triton/triton_support_legacy.h" #include "xla/service/instruction_fusion.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" namespace xla { namespace gpu { namespace { absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec> FilterTrivialDims( const absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec>& dim_iter_specs) { absl::flat_hash_map<int, TensorIterationSpec::DimIterationSpec> non_trivial_dim_iteration_specs; for (const auto& [dim, dim_spec] : dim_iter_specs) { if (dim_spec.size() == 1 && dim_spec[0].count == 1) { continue; } non_trivial_dim_iteration_specs[dim] = dim_spec; } return non_trivial_dim_iteration_specs; } } const TensorIterationSpec::DimIterationSpec* TensorIterationSpec::Find( const int dimension) const { if (auto it = dim_iteration_specs_.find(dimension); it != dim_iteration_specs_.end()) { return &it->second; } return nullptr; } std::vector<int> TensorIterationSpec::GetDimensions() const { std::vector<int> result; result.reserve(dim_iteration_specs_.size()); for (const auto& [dim, _] : dim_iteration_specs_) { result.push_back(dim); } return result; } bool TensorIterationSpec::IsPhysicallyEquivalent( const TensorIterationSpec& other) const { const absl::flat_hash_map<int, DimIterationSpec> non_trivial_dim_iteration_specs = FilterTrivialDims(dim_iteration_specs_); const absl::flat_hash_map<int, DimIterationSpec> other_non_trivial_dim_iteration_specs = FilterTrivialDims(other.dim_iteration_specs_); if (non_trivial_dim_iteration_specs.size() != other_non_trivial_dim_iteration_specs.size()) { return false; } for (const auto& pair : non_trivial_dim_iteration_specs) { int dimension = pair.first; const DimIterationSpec& dim_iter_spec = pair.second; auto other_it = other_non_trivial_dim_iteration_specs.find(dimension); if (other_it == other_non_trivial_dim_iteration_specs.end()) { return false; } const DimIterationSpec& other_dim_iter_spec = other_it->second; if (dim_iter_spec.size() != other_dim_iter_spec.size()) { return false; } for (size_t i = 0; i < dim_iter_spec.size(); i++) { if (!dim_iter_spec[i].IsPhysicallyEquivalent(other_dim_iter_spec[i])) { return false; } } } return true; } std::string TensorIterationSpec::IterationSpecFragment::ToString() const { return absl::StrCat("{stride=", stride, ", count=", count, ", slice_start=", slice_start, ", sliced_count=", sliced_count, ", subfragments=[", absl::StrJoin(subfragments, ", "), "]}"); } std::string TensorIterationSpec::ToString() const { return absl::StrCat( "{", absl::StrJoin(dim_iteration_specs_, ", ", [&](std::string* s, const auto& kv) { absl::StrAppend( s, kv.first, ": ", "[", absl::StrJoin(kv.second, ", ", [&](std::string* ss, const auto& v) { absl::StrAppend(ss, v.ToString()); }), "]"); }), "}"); } namespace triton_fusion { using Fragment = DimensionOrder::Fragment; using Fragments = DimensionOrder::Fragments; using FragmentOrders = DimensionOrder::FragmentOrders; DimensionOrder DimensionOrder::FromDotOperandOrOutput( const HloInstruction& hlo, const int split_k_dimension_index) { DimensionOrder dim_order; dim_order.tensor_fragments_order_.reserve(hlo.shape().rank()); for (const int i : hlo.shape().layout().minor_to_major()) { int target_dim_number = i; if (i == split_k_dimension_index) { CHECK(!dim_order.tensor_fragments_order_.empty()) << "The split-K batch dimension has be preceded by the contracting " "dimension it originates from by construction."; target_dim_number = dim_order.tensor_fragments_order_.back().dst_dim_number(); } dim_order.dim_fragments_orders_[target_dim_number].push_back( dim_order.tensor_fragments_order_.size()); dim_order.tensor_fragments_order_.push_back( Fragment{target_dim_number, hlo.shape().dimensions(i)}); } return dim_order; } std::string DimensionOrder::Fragment::ToString() const { return absl::StrCat(dst_dim_number_, ":", count_, ":", slice_start_, "-", sliced_count_); } std::string DimensionOrder::ToString() const { std::string ret = absl::StrJoin(tensor_fragments_order_, " - ", [](std::string* out, const Fragment& f) { absl::StrAppend(out, f.ToString(), " "); }); absl::StrAppend(&ret, "|"); for (const auto& [dim, fragments] : dim_fragments_orders_) { absl::StrAppend(&ret, dim, ":", absl::StrJoin(fragments, ","), " "); } return ret; } TensorIterationSpec DimensionOrder::ToTensorIterationSpec() const { const Fragments& dim_fragments = TensorFragmentsOrder(); TensorIterationSpec tensor_spec; int64_t accumulated_stride = 1; int last_dim = -1; for (int dim_order_index = 0; dim_order_index < dim_fragments.size(); ++dim_order_index) { const DimensionOrder::Fragment& fragment = dim_fragments[dim_order_index]; VLOG(6) << fragment.ToString(); TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[fragment.dst_dim_number()]; if (last_dim == fragment.dst_dim_number()) { if (!dim_spec.empty() && !dim_spec.back().subfragments.empty() && dim_spec.back().subfragments.back() == 1) { dim_spec.back().subfragments.pop_back(); } if (fragment.full_count() > 1) { CHECK(!dim_spec.empty()); CHECK(!dim_spec.back().is_sliced()) << "Only the major-most fragment can have an offset."; dim_spec.back().slice_start = fragment.slice_start() * dim_spec.back().count; dim_spec.back().sliced_count = fragment.sliced_count() * dim_spec.back().count; dim_spec.back().count *= fragment.full_count(); dim_spec.back().subfragments.push_back(fragment.sliced_count()); } } else { dim_spec.push_back(TensorIterationSpec::IterationSpecFragment{ accumulated_stride, fragment.full_count(), fragment.slice_start(), fragment.sliced_count(), {fragment.sliced_count()}}); } accumulated_stride *= fragment.full_count(); last_dim = fragment.dst_dim_number(); } for (int dim_idx : tensor_spec.GetDimensions()) { TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[dim_idx]; if (dim_spec.size() <= 1) continue; TensorIterationSpec::DimIterationSpec filtered_dim_spec; absl::c_copy_if(dim_spec, std::back_inserter(filtered_dim_spec), [](const TensorIterationSpec::IterationSpecFragment& f) { return f.count != 1; }); tensor_spec[dim_idx] = filtered_dim_spec; } tensor_spec.RemoveEmptyDimensions(); return tensor_spec; } namespace { std::optional<int> LogicalIndexOfLabeledDimension( const Shape& shape, const DimensionOrder& dim_order, const int label) { auto fragment_it = dim_order.TensorFragmentsOrder().cbegin(); for (int dim : shape.layout().minor_to_major()) { const int64_t dim_size = shape.dimensions()[dim]; int64_t fragments_size = 1; while (fragments_size < dim_size) { fragments_size *= fragment_it->full_count(); if (fragment_it->dst_dim_number() == label) { return dim; } ++fragment_it; } } return std::nullopt; } using Int64OrError = std::variant<int64_t, FusionDecision>; Int64OrError CombineSplitDimMajorPartSizeReqs(int64_t a, int64_t b) { if (a == b || b == kNoSplitRequirement) { return a; } if (a == kNoSplitRequirement) { return b; } return FusionDecision::Forbid("Conflicting splits of splittable dimension"); } } DotRequirementsOrError CombineDotRequirements( DotRequirements a, DotRequirementsOrError b_or_error) { if (std::holds_alternative<FusionDecision>(b_or_error)) { return b_or_error; } const DotRequirements& b = std::get<DotRequirements>(b_or_error); Int64OrError combined_size_req = CombineSplitDimMajorPartSizeReqs(a.splittable_dimension_major_part_size, b.splittable_dimension_major_part_size); if (std::holds_alternative<FusionDecision>(combined_size_req)) { return std::get<FusionDecision>(combined_size_req); } return DotRequirements(std::get<int64_t>(combined_size_req)); } namespace { DotRequirementsOrError GetRequirementsIfSupportedOrder( const DimensionOrder& order, const DotProperties& properties) { VLOG(8) << order.ToString(); int64_t split_dim_major_part = kNoSplitRequirement; const Fragments& tensor_dim_fragments = order.TensorFragmentsOrder(); for (const auto& [dim_index, dim_fragments] : order.DimFragmentsOrders()) { CHECK(!dim_fragments.empty()); for (int i = 0; i < dim_fragments.size() - 1; ++i) { if (tensor_dim_fragments[dim_fragments[i]].is_sliced()) { return FusionDecision::Forbid("Sliced non-major-most fragment."); } } int group_counter = 0; int last_seen_group_last_fragment_index = -1; auto fragment_it = dim_fragments.cbegin(); while (true) { if (fragment_it == dim_fragments.cend()) { break; } int64_t grouped_size = tensor_dim_fragments[*fragment_it].full_count(); while ((fragment_it + 1) != dim_fragments.cend() && *(fragment_it + 1) == *fragment_it + 1) { ++fragment_it; grouped_size *= tensor_dim_fragments[*fragment_it].full_count(); } if (grouped_size == 1) { ++fragment_it; continue; } if (last_seen_group_last_fragment_index > *fragment_it) { return FusionDecision::Forbid("Transpose within a dimension."); } ++group_counter; if (group_counter > 1) { const int splittable_dimension_index = properties.splittable_dimension_index; if (dim_index == splittable_dimension_index) { if (group_counter == 2) { if (split_dim_major_part != kNoSplitRequirement && split_dim_major_part != grouped_size) { return FusionDecision::Forbid( "Conflicting splits of splittable dimension"); } split_dim_major_part = grouped_size; } else if (group_counter > 2) { return FusionDecision::Forbid( "2nd split of a splittable dimension."); } } else { return FusionDecision::Forbid("Unsupported split of a dimension."); } } last_seen_group_last_fragment_index = *fragment_it; ++fragment_it; } } return DotRequirements(split_dim_major_part); } DotRequirementsOrError GetRequirementsIfSupportedOrders( const HloInstruction& hlo, const DimOrderMap& dim_orders, const DotProperties& properties) { const DotRequirements empty_requirements(kNoSplitRequirement); auto get_requirements = [&](const HloInstruction& instr) -> DotRequirementsOrError { if (auto it = dim_orders.find(&instr); it != dim_orders.end()) { return GetRequirementsIfSupportedOrder(it->second, properties); } return empty_requirements; }; DotRequirements requirements = empty_requirements; for (const HloInstruction* operand : hlo.operands()) { DotRequirementsOrError requirements_or_error = CombineDotRequirements(requirements, get_requirements(*operand)); if (std::holds_alternative<FusionDecision>(requirements_or_error)) { return requirements_or_error; } requirements = std::get<DotRequirements>(requirements_or_error); } return CombineDotRequirements(requirements, get_requirements(hlo)); } DimOrderMap GetPropagatedDimOrdersForElementwise( const HloInstruction& hlo, TransformDirection direction, const DimensionOrder& src_dim_order) { if (direction == TransformDirection::kOutputToInput) { DimOrderMap map; for (const HloInstruction* operand : hlo.operands()) { map.insert({operand, src_dim_order}); } return map; } return {{&hlo, src_dim_order}}; } const HloInstruction& GetSourceHlo(const HloInstruction& hlo, TransformDirection direction) { CHECK_GE(hlo.operand_count(), 1); if (direction == TransformDirection::kOutputToInput) { return hlo; } return *hlo.operand(0); } using ConstInstructionVector = absl::InlinedVector<const HloInstruction*, 2>; ConstInstructionVector GetDestHlos(const HloInstruction& hlo, TransformDirection direction) { if (direction == TransformDirection::kInputToOutput) { return {&hlo}; } ConstInstructionVector hlos; hlos.reserve(hlo.operands().size()); for (const HloInstruction* operand : hlo.operands()) { hlos.push_back(operand); } return hlos; } const HloInstruction& GetDestHlo(const HloInstruction& hlo, TransformDirection direction) { CHECK_EQ(hlo.operand_count(), 1); if (direction == TransformDirection::kInputToOutput) { return hlo; } return *hlo.operand(0); } DimOrderMapOrError GetPropagatedDimOrdersForBitcast( const HloInstruction& hlo, const TransformDirection direction, const DimensionOrder& src_dim_order, const DotProperties& properties) { const HloInstruction& dst = GetDestHlo(hlo, direction); const Shape& dst_shape = dst.shape(); const Fragments& src_fragments_order = src_dim_order.TensorFragmentsOrder(); DimOrderMap dst_dim_orders; DimensionOrder& dst_dim_order = dst_dim_orders.insert({&dst, DimensionOrder()}).first->second; Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder(); int64_t dst_remaining_size = 1; absl::flat_hash_map<const Fragment*, std::vector<int>> src_to_dst; auto dst_dim_it = dst_shape.layout().minor_to_major().cbegin(); const auto dst_dim_end = dst_shape.layout().minor_to_major().cend(); for (auto src_dim = src_fragments_order.cbegin(); src_dim != src_fragments_order.cend(); ++src_dim) { auto add_new_fragment = [&](const Fragment& fragment) { dst_fragments_order.push_back(fragment); src_to_dst[&*src_dim].push_back(dst_fragments_order.size() - 1); }; if (dst_remaining_size >= src_dim->full_count()) { if (dst_remaining_size % src_dim->full_count()) { return FusionDecision::Forbid("Unsupported bitcast"); } add_new_fragment(*src_dim); dst_remaining_size /= src_dim->full_count(); } else { int64_t src_remaining_size = src_dim->full_count(); if (dst_remaining_size > 1) { if (src_remaining_size % dst_remaining_size || (src_dim->is_sliced())) { return FusionDecision::Forbid("Unsupported bitcast"); } add_new_fragment( Fragment{src_dim->dst_dim_number(), dst_remaining_size}); src_remaining_size /= dst_remaining_size; dst_remaining_size = 1; } while (src_remaining_size > 1) { CHECK(dst_dim_it != dst_dim_end); int64_t dst_dim_size = dst_shape.dimensions(*dst_dim_it); int64_t new_fragment_size = dst_dim_size; if (dst_dim_size > src_remaining_size) { if (dst_dim_size % src_remaining_size) { return FusionDecision::Forbid("Unsupported bitcast"); } dst_remaining_size = dst_dim_size / src_remaining_size; new_fragment_size = src_remaining_size; } if (src_dim->is_sliced()) { return FusionDecision::Forbid("Unsupported bitcast"); } add_new_fragment( Fragment{src_dim->dst_dim_number(), new_fragment_size}); src_remaining_size /= new_fragment_size; ++dst_dim_it; } } } CHECK_EQ(dst_remaining_size, 1); while (dst_dim_it != dst_dim_end) { if (dst_shape.dimensions(*dst_dim_it) != 1) { return FusionDecision::Forbid("Unsupported bitcast"); } if (!dst_fragments_order.empty()) { dst_fragments_order.push_back( Fragment{dst_fragments_order.back().dst_dim_number(), 1}); src_to_dst[&src_fragments_order.back()].push_back( dst_fragments_order.size() - 1); } ++dst_dim_it; } FragmentOrders& dst_dim_fragment_orders = dst_dim_order.DimFragmentsOrders(); for (const auto& [dim_index, dim_sequence] : src_dim_order.DimFragmentsOrders()) { std::vector<int>& dst = dst_dim_fragment_orders[dim_index]; dst.reserve(dim_sequence.size()); for (const int src : dim_sequence) { std::copy(src_to_dst[&src_fragments_order[src]].cbegin(), src_to_dst[&src_fragments_order[src]].cend(), std::back_inserter(dst)); } } return dst_dim_orders; } DimOrderMapOrError GetPropagatedDimOrdersForDimAlteringOp( const HloInstruction& hlo, const TransformDirection direction, const DimensionOrder& src_dim_order, const DotProperties& properties) { std::list<Fragment> new_fragments; const HloInstruction& src = GetSourceHlo(hlo, direction); Fragments src_fragments_order = src_dim_order.TensorFragmentsOrder(); if (hlo.opcode() == HloOpcode::kSlice && ShapeUtil::IsEffectiveScalar(hlo.shape())) { return FusionDecision::Forbid("Slice to scalar is not implemented yet."); } std::vector<std::vector<Fragment*>> src_physical; src_physical.reserve(src.shape().rank()); if (src_fragments_order.size() < src.shape().rank()) { return FusionDecision::Forbid( "Cannot propagate further from trivial sized tensor"); } auto src_fragment_it = src_fragments_order.begin(); for (int64_t dim_index : src.shape().layout().minor_to_major()) { const int64_t dim_size = src.shape().dimensions(dim_index); int64_t subdim_size_accumulator = 1; std::vector<Fragment*> subdim_group; do { CHECK(src_fragment_it != src_fragments_order.end()); subdim_size_accumulator *= src_fragment_it->full_count(); subdim_group.push_back(&*src_fragment_it); ++src_fragment_it; } while (subdim_size_accumulator < dim_size); CHECK_EQ(subdim_size_accumulator, dim_size); src_physical.push_back(subdim_group); } std::vector<std::vector<Fragment*>> src_logical; src_logical.resize(src_physical.size()); for (int i = 0; i < src_physical.size(); ++i) { src_logical[src.shape().layout().minor_to_major(i)] = src_physical[i]; } DimOrderMap dst_dim_orders; int64_t concat_accumulated_size = 0; for (const HloInstruction* dst : GetDestHlos(hlo, direction)) { DimensionOrder& dst_dim_order = dst_dim_orders.insert({dst, DimensionOrder()}).first->second; std::vector<std::vector<Fragment*>> dst_logical; if (hlo.opcode() == HloOpcode::kTranspose) { const auto* transpose = Cast<HloTransposeInstruction>(&hlo); std::vector<int64_t> permutation(transpose->dimensions().cbegin(), transpose->dimensions().cend()); if (direction == TransformDirection::kInputToOutput) { permutation = InversePermutation(permutation); } dst_logical.resize(permutation.size()); for (int i = 0; i < permutation.size(); ++i) { dst_logical[permutation[i]] = src_logical[i]; } } else if (hlo.opcode() == HloOpcode::kBroadcast) { const auto* broadcast = Cast<HloBroadcastInstruction>(&hlo); dst_logical.resize(broadcast->dimensions().size()); for (int i = 0; i < broadcast->dimensions().size(); ++i) { dst_logical[i] = src_logical[broadcast->dimensions()[i]]; } } else if (hlo.opcode() == HloOpcode::kReduce) { if (dst != &hlo && hlo.operand_index(dst) == 1) { continue; } const auto* reduce = Cast<HloReduceInstruction>(&hlo); dst_logical.resize(src_logical.size() + reduce->dimensions().size()); if (reduce->dimensions().size() != 1) { return FusionDecision::Forbid("Unsupported reduction."); } else if (reduce->dimensions().front() != reduce->operand(0)->shape().rank() - 1) { return FusionDecision::Forbid("Only row reductions are supported."); } } else if (hlo.opcode() == HloOpcode::kConcatenate) { dst_logical.resize(src_logical.size()); for (int i = 0; i < src_logical.size(); ++i) { if (i == hlo.concatenate_dimension()) { if (src_logical[i].size() != 1 || src_logical[i][0]->is_sliced()) { return FusionDecision::Forbid("Unsupported concatenation."); } const Fragment& src_fragment = *src_logical[i][0]; Fragment& dst_fragment = new_fragments.emplace_back( src_fragment.dst_dim_number(), dst->shape().dimensions(i)); dst_fragment.set_slice(-concat_accumulated_size, dst->shape().dimensions(i)); concat_accumulated_size += dst->shape().dimensions(i); dst_logical[i].push_back(&dst_fragment); } else { dst_logical[i] = src_logical[i]; } } } else if (hlo.opcode() == HloOpcode::kCopy) { CHECK(ShapeUtil::SameDimensions(src.shape(), dst->shape())); dst_logical = src_logical; } else if (hlo.opcode() == HloOpcode::kPad) { if (dst != &hlo && hlo.operand_index(dst) == 1) { continue; } const auto* pad = Cast<HloPadInstruction>(&hlo); dst_logical.resize(src_logical.size()); for (int i = 0; i < src_logical.size(); ++i) { const int padding = pad->padding_config().dimensions(i).edge_padding_high(); CHECK_EQ(pad->padding_config().dimensions(i).edge_padding_low(), 0); CHECK_EQ(pad->padding_config().dimensions(i).interior_padding(), 0); if (padding == 0) { dst_logical[i] = src_logical[i]; } else { const std::vector<Fragment*>& fragments = src_logical[i]; CHECK_GE(fragments.size(), 2); CHECK(absl::c_all_of(fragments, [&](const Fragment* fragment) { return fragment->dst_dim_number() == fragments.front()->dst_dim_number(); })); std::vector<Fragment*> non_trivial_fragments; absl::c_copy_if(fragments, std::back_inserter(non_trivial_fragments), [](const Fragment* fragment) { return fragment->full_count() > 1; }); CHECK_EQ(non_trivial_fragments.size(), 2); new_fragments.emplace_back( non_trivial_fragments[0]->dst_dim_number(), non_trivial_fragments[0]->full_count() * non_trivial_fragments[1]->full_count() - padding); dst_logical[i] = {&new_fragments.back()}; } } } else if (hlo.opcode() == HloOpcode::kSlice) { const auto slice = Cast<HloSliceInstruction>(&hlo); dst_logical.resize(src_logical.size()); for (int dim = 0; dim < src_logical.size(); ++dim) { dst_logical[dim] = src_logical[dim]; if (slice->slice_limits(dim) - slice->slice_starts(dim) != dst->shape().dimensions(dim)) { if (dst_logical[dim].size() > 1) { return FusionDecision::Forbid("Slicing of fragmented dimension."); } auto fragment = dst_logical[dim].front(); fragment->set_count(dst->shape().dimensions(dim)); fragment->set_slice( fragment->slice_start() + slice->slice_starts(dim), fragment->sliced_count()); } } } else if (hlo.opcode() == HloOpcode::kDynamicSlice) { if (dst != &hlo && hlo.operand_index(dst) >= 1) { continue; } const auto dynamic_slice = Cast<HloDynamicSliceInstruction>(&hlo); dst_logical.resize(src_logical.size()); for (int dim = 0; dim < src_logical.size(); ++dim) { dst_logical[dim] = src_logical[dim]; if (dynamic_slice->slice_sizes(dim) != dst->shape().dimensions(dim)) { if (dst_logical[dim].size() > 1) { return FusionDecision::Forbid("Slicing of fragmented dimension."); } auto fragment = dst_logical[dim].front(); fragment->set_count(dst->shape().dimensions(dim)); fragment->set_slice(fragment->slice_start(), dst->shape().dimensions(dim)); } } } else { return FusionDecision::Forbid("Function called on a wrong instruction."); } absl::flat_hash_map<const Fragment*, int> src_to_dst; Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder(); FragmentOrders& dst_dim_fragments_order = dst_dim_order.DimFragmentsOrders(); absl::flat_hash_set<int> dim_numbers_present_in_dst; for (const int64_t dim_idx : dst->shape().layout().minor_to_major()) { for (const Fragment* subdim : dst_logical[dim_idx]) { dst_fragments_order.push_back(*subdim); src_to_dst[subdim] = dst_fragments_order.size() - 1; dim_numbers_present_in_dst.insert(subdim->dst_dim_number()); } } for (const auto& [dim_index, dim_sequence] : src_dim_order.DimFragmentsOrders()) { for (const int fragment_number : dim_sequence) { const auto it = src_to_dst.find(&src_fragments_order[fragment_number]); if (it == src_to_dst.cend()) { if (hlo.opcode() == HloOpcode::kBroadcast && src_fragments_order[fragment_number].full_count() > 1 && dim_numbers_present_in_dst.contains(dim_index)) { return FusionDecision::Forbid("Unsupported broadcast"); } continue; } dst_dim_fragments_order[dim_index].push_back(it->second); } } } return dst_dim_orders; } DimOrderMapOrError GetPropagatedDimOrders(const HloInstruction& hlo, const TransformDirection direction, const DimensionOrder& src_dim_order, const DotProperties& properties) { VLOG(7) << "Analyzing " << hlo.ToString(); if (hlo.opcode() != HloOpcode::kParameter && direction == TransformDirection::kOutputToInput && absl::c_any_of(hlo.users(), [](const HloInstruction* user) { return (user->opcode() == HloOpcode::kConcatenate || user->opcode() == HloOpcode::kDynamicSlice); })) { return FusionDecision::Forbid( "No fusion into concatenations or dynamic slice."); } if (hlo.opcode() == HloOpcode::kParameter || hlo_query::IsScalarConstant(&hlo)) { CHECK(direction == TransformDirection::kOutputToInput); return DimOrderMap{}; } else if (hlo.opcode() == HloOpcode::kTranspose || hlo.opcode() == HloOpcode::kCopy) { return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order, properties); } else if (hlo.opcode() == HloOpcode::kBroadcast) { if (direction != TransformDirection::kOutputToInput) { return FusionDecision::Forbid("Unsupported broadcast direction."); } return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order, properties); } else if (hlo.opcode() == HloOpcode::kPad) { if (direction != TransformDirection::kOutputToInput) { return FusionDecision::Forbid("Unsupported pad direction."); } return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order, properties); } else if (hlo.operand_count() > 0 && legacy_triton::IsTritonSupportedElementwiseUpToFloatNormalization( hlo.opcode(), hlo.operand(0)->shape().element_type())) { return GetPropagatedDimOrdersForElementwise(hlo, direction, src_dim_order); } else if (hlo.opcode() == HloOpcode::kBitcast) { return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order, properties); } else if (hlo.opcode() == HloOpcode::kSlice) { if (direction != TransformDirection::kOutputToInput) { return FusionDecision::Forbid("Unsupported slice direction."); } return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order, properties); } else if (hlo.opcode() == HloOpcode::kDynamicSlice && direction == TransformDirection::kOutputToInput) { if (CodegenDecision decision = legacy_triton::IsTritonSupportedDynamicSlice( *Cast<HloDynamicSliceInstruction>(&hlo)); !decision.CanFuse()) { return decision; } return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order, properties); } else if (hlo.opcode() == HloOpcode::kReshape) { if (!ShapeUtil::ReshapeIsBitcast(hlo.operand(0)->shape(), hlo.shape())) { return FusionDecision::Forbid("Non-bitcast reshape."); } return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order, properties); } else if (hlo.opcode() == HloOpcode::kConcatenate && direction == TransformDirection::kOutputToInput) { int64_t noncontracting_dim_label = properties.noncontracting_dimension; const FragmentOrders& src_dim_fragments_orders = src_dim_order.DimFragmentsOrders(); auto noncontracting_dim_fragment_order_it = src_dim_fragments_orders.find(noncontracting_dim_label); if (noncontracting_dim_fragment_order_it != src_dim_fragments_orders.end()) { if (noncontracting_dim_fragment_order_it->second.size() > 1) { return FusionDecision::Forbid( "Concatenations on split non-contracting dimensions are " "unsupported."); } } auto dim = LogicalIndexOfLabeledDimension(hlo.shape(), src_dim_order, noncontracting_dim_label); if (!dim.has_value() || dim.value() != hlo.concatenate_dimension()) { return FusionDecision::Forbid("Unsupported concatenation."); } if (absl::c_any_of(hlo.operands(), [&hlo](const HloInstruction* operand) { constexpr int kMinConcatFragmentSize = 64; return operand->shape().dimensions(hlo.concatenate_dimension()) % kMinConcatFragmentSize != 0; })) { return FusionDecision::Forbid( "At least one operand of concatenation can not be perfectly tiled."); } return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order, properties); } return FusionDecision::Forbid("Unimplemented instruction."); } int64_t InputMinusOutputBytes(const HloInstruction& hlo) { CHECK(!hlo.shape().IsTuple()); int64_t input_size = 0; for (const HloInstruction* operand : hlo.operands()) { CHECK(!operand->shape().IsTuple()); input_size += ShapeUtil::ByteSizeOf(operand->shape()); } return input_size - ShapeUtil::ByteSizeOf(hlo.shape()); } bool CanNotBeFusedIntoAUser(const HloInstruction& hlo) { return hlo.IsRoot() || (hlo.user_count() == 1 && hlo.users()[0]->IsRoot() && hlo.users()[0]->opcode() == HloOpcode::kTuple); } constexpr int kIoToleranceBytes = 1024; bool IsInputWorthFusing(const HloInstruction& hlo) { if (InputMinusOutputBytes(hlo) <= kIoToleranceBytes) { return true; } if (hlo.user_count() > 1) { return false; } if (hlo.opcode() == HloOpcode::kSlice && hlo_query::AllOperandsAreParametersOrConstants(hlo)) { return true; } return hlo_query::AllOperandsAreParametersOrConstantsWithSingleUser(hlo); } bool IsOutputWorthFusing(const HloInstruction& hlo) { return CanNotBeFusedIntoAUser(hlo) || InputMinusOutputBytes(hlo) >= -kIoToleranceBytes; } FusionDecision IsConversionWorthFusing(const HloInstruction& input, se::GpuComputeCapability gpu_version) { if (ShapeUtil::ByteSizeOf(input.operand(0)->shape()) > ShapeUtil::ByteSizeOf(input.shape())) { return FusionDecision::Forbid("Narrowing conversion."); } return FusionDecision::Allow(); } } DimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirements( const HloInstruction& hlo, const DimensionOrder& src_dim_order, TransformDirection direction, const DotProperties& properties) { DimOrderMapOrError propagated_dim_orders_or_error = GetPropagatedDimOrders(hlo, direction, src_dim_order, properties); if (std::holds_alternative<FusionDecision>(propagated_dim_orders_or_error)) { return std::get<FusionDecision>(propagated_dim_orders_or_error); } DimOrderMap propagated_dim_orders = std::move(std::get<DimOrderMap>(propagated_dim_orders_or_error)); DotRequirementsOrError requirements_or_error = GetRequirementsIfSupportedOrders(hlo, propagated_dim_orders, properties); if (std::holds_alternative<FusionDecision>(requirements_or_error)) { return std::get<FusionDecision>(requirements_or_error); } return DimOrdersAndReqs{propagated_dim_orders, std::get<DotRequirements>(requirements_or_error)}; } DimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible( const HloInstruction& hlo, TransformDirection transform_direction, const std::optional<int>& src_operand_index, const DimensionOrder& src_dim_order, const se::GpuComputeCapability& gpu_version, const DotProperties& properties) { CHECK_EQ(transform_direction == TransformDirection::kInputToOutput, src_operand_index.has_value()); if (hlo.opcode() == HloOpcode::kTuple || hlo.opcode() == HloOpcode::kGetTupleElement) { return FusionDecision::Forbid("Unsupported instruction."); } if (hlo.opcode() == HloOpcode::kReduce || hlo.opcode() == HloOpcode::kAllReduce || hlo.opcode() == HloOpcode::kAllReduceStart || hlo.opcode() == HloOpcode::kAllReduceDone) { return FusionDecision::Forbid("Reductions are not fused yet."); } if (hlo.opcode() == HloOpcode::kPad) { return FusionDecision::Forbid("Pads are not fused yet."); } if (auto decision = legacy_triton::IsTritonSupportedInstruction(hlo, gpu_version); !decision.CanFuse()) { return decision; } DimOrdersAndReqsOrError result_or_error = GetPropagatedDimOrdersAndRequirements(hlo, src_dim_order, transform_direction, properties); if (std::holds_alternative<FusionDecision>(result_or_error)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(result_or_error).Explain(); return result_or_error; } DimOrdersAndReqs dim_orders_and_requirements = std::move(std::get<DimOrdersAndReqs>(result_or_error)); int fusion_level = hlo.GetModule()->config().debug_options().xla_gpu_triton_fusion_level(); if (transform_direction == TransformDirection::kOutputToInput) { if (fusion_level < 2) { if (hlo.opcode() == HloOpcode::kConvert) { if (FusionDecision decision = IsConversionWorthFusing(hlo, gpu_version); !decision) { return decision; } } else if (hlo.IsElementwise() && hlo.opcode() != HloOpcode::kCopy) { return FusionDecision::Forbid("Ignored elementwise operation"); } } else { bool accepted = false; if (hlo.IsElementwise() && hlo.operand_count() == 2) { for (const HloInstruction* operand : hlo.operands()) { if (operand->opcode() == HloOpcode::kBroadcast && (operand->operand(0)->opcode() == HloOpcode::kParameter || operand->operand(0)->opcode() == HloOpcode::kConstant) && std::holds_alternative<DimOrdersAndReqs>( GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible( *operand, TransformDirection::kOutputToInput, std::nullopt, dim_orders_and_requirements.dim_orders.at(operand), gpu_version, properties))) { accepted = true; break; } } } if (!accepted && !IsInputWorthFusing(hlo)) { return FusionDecision::Forbid( "Not obviously profitable to fuse as input."); } } } else { if (fusion_level < 2) { return FusionDecision::Forbid( "Skipping fusing outputs at low fusion levels."); } for (int i = 0; i < hlo.operand_count(); ++i) { const HloInstruction* operand = hlo.operand(i); if (i == *src_operand_index) { continue; } if ((operand->opcode() == HloOpcode::kBroadcast && ShapeUtil::IsScalar(operand->operand(0)->shape())) || operand->opcode() == HloOpcode::kParameter) { continue; } return FusionDecision::Forbid( "Has multiple inputs - not properly analyzed yet."); } if (!IsOutputWorthFusing(hlo)) { return FusionDecision::Forbid( "Not obviously profitable to fuse as output."); } } return dim_orders_and_requirements; } } } }
#include "xla/service/gpu/triton_tiling_propagation.h" #include <vector> #include <gtest/gtest.h> #include "xla/tests/hlo_test_base.h" namespace xla::gpu { namespace { using TritonTilingPropagationTest = HloTestBase; using triton_fusion::DimensionOrder; DimensionOrder FromFragments(DimensionOrder::Fragments fragments) { DimensionOrder dim_order; DimensionOrder::Fragments& tensor_fragments_order = dim_order.TensorFragmentsOrder(); DimensionOrder::FragmentOrders& dim_fragments_orders = dim_order.DimFragmentsOrders(); for (const DimensionOrder::Fragment& fragment : fragments) { tensor_fragments_order.push_back(fragment); dim_fragments_orders[fragment.dst_dim_number()].push_back( tensor_fragments_order.size()); } return dim_order; } TEST_F( TritonTilingPropagationTest, DimensionOrdersRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) { DimensionOrder::Fragment fragment_1(0, 97); DimensionOrder::Fragment fragment_2(0, 1); DimensionOrder dimension_order_1 = FromFragments({fragment_1, fragment_2}); DimensionOrder::Fragment fragment_3(0, 97); DimensionOrder::Fragment fragment_4(1, 1); DimensionOrder dimension_order_2 = FromFragments({fragment_3, fragment_4}); EXPECT_TRUE(dimension_order_1.IsPhysicallyEquivalent(dimension_order_2)); } TEST_F( TritonTilingPropagationTest, IterationSpecsRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) { TensorIterationSpec::IterationSpecFragment fragment_1 = { 1, 97, 0, 97, {97}}; TensorIterationSpec spec_1; spec_1[0].push_back(fragment_1); TensorIterationSpec::IterationSpecFragment fragment_2 = { 1, 97, 0, 97, {97}}; TensorIterationSpec::IterationSpecFragment fragment_3 = { 97, 1, 0, 1, {1}}; TensorIterationSpec spec_2; spec_2[0].push_back(fragment_2); spec_2[1].push_back(fragment_3); EXPECT_TRUE(spec_1.IsPhysicallyEquivalent(spec_2)); } TEST_F(TritonTilingPropagationTest, DimensionsShouldNotBeRemovedByToTensorIterationSpec) { DimensionOrder::Fragment fragment_0(0, 97); DimensionOrder::Fragment fragment_1(1, 1); DimensionOrder dimension_order = FromFragments({fragment_0, fragment_1}); TensorIterationSpec spec = dimension_order.ToTensorIterationSpec(); const TensorIterationSpec::DimIterationSpec* dim_spec_0 = spec.Find(0); EXPECT_NE(dim_spec_0, nullptr); EXPECT_EQ(dim_spec_0->size(), 1); EXPECT_EQ(dim_spec_0->at(0).count, 97); const TensorIterationSpec::DimIterationSpec* dim_spec_1 = spec.Find(1); EXPECT_NE(dim_spec_1, nullptr); EXPECT_EQ(dim_spec_1->size(), 1); EXPECT_EQ(dim_spec_1->at(0).count, 1); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
22010223-07c7-4dc2-819a-01d14addc9b5
cpp
tensorflow/tensorflow
ir_emission_utils
third_party/xla/xla/service/cpu/ir_emission_utils.cc
third_party/xla/xla/service/cpu/ir_emission_utils_test.cc
#include "xla/service/cpu/ir_emission_utils.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/layout_util.h" #include "xla/service/cpu/cpu_runtime.h" #include "xla/shape_util.h" #include "xla/window_util.h" namespace xla { namespace cpu { int64_t GetMinimumAlignmentForArray( const Shape& shape, const TargetMachineFeatures& target_machine_features) { CHECK(LayoutUtil::IsDenseArray(shape)); int64_t allocation_size_bytes = ShapeUtil::ElementsIn(shape) * ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type()); return target_machine_features.minimum_alignment_for_allocation( allocation_size_bytes); } bool PotentiallyImplementedAsEigenConvolution( const HloInstruction& convolution, const TargetMachineFeatures& target_machine_features) { const Shape& input_shape = convolution.operand(0)->shape(); const Shape& kernel_shape = convolution.operand(1)->shape(); const Shape& output_shape = convolution.shape(); auto is_aligned = [&](const Shape& shape) { return GetMinimumAlignmentForArray(shape, target_machine_features) >= TargetMachineFeatures::kEigenExpectedTensorAlignment; }; if (!is_aligned(input_shape) || !is_aligned(kernel_shape) || !is_aligned(output_shape)) { return false; } if (ShapeUtil::IsZeroElementArray(input_shape) || ShapeUtil::IsZeroElementArray(kernel_shape)) { return false; } CHECK( ShapeUtil::SameElementTypeIgnoringFpPrecision(input_shape, kernel_shape)); PrimitiveType primitive_type = input_shape.element_type(); if (primitive_type != F16 && primitive_type != F32) { return false; } if (window_util::HasWindowReversal(convolution.window())) { return false; } const ConvolutionDimensionNumbers& dnums = convolution.convolution_dimension_numbers(); const int64_t num_spatial_dims = dnums.output_spatial_dimensions_size(); if (num_spatial_dims < 1 || num_spatial_dims > 3) { return false; } for (int64_t i = 0; i < num_spatial_dims; ++i) { if (dnums.input_spatial_dimensions(i) != i + 1) { return false; } if (dnums.kernel_spatial_dimensions(i) != i) { return false; } if (dnums.output_spatial_dimensions(i) != i + 1) { return false; } } return dnums.input_batch_dimension() == 0 && dnums.input_feature_dimension() == input_shape.dimensions_size() - 1 && dnums.output_batch_dimension() == 0 && dnums.output_feature_dimension() == output_shape.dimensions_size() - 1 && dnums.kernel_input_feature_dimension() == kernel_shape.dimensions_size() - 2 && dnums.kernel_output_feature_dimension() == kernel_shape.dimensions_size() - 1; } } }
#include "xla/service/cpu/ir_emission_utils.h" #include <memory> #include "xla/service/cpu/target_machine_features_fake.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { using IrEmitterTest = HloTestBase; TEST_F(IrEmitterTest, ConvWithZeroSizedKernelNotImplementedAsEigen) { const char* const hlo_string = R"( HloModule ModuleWithConv ENTRY Conv { input = f32[32,50,28,28]{3,2,1,0} parameter(0) kernel = f32[50,0,5,5]{3,2,1,0} parameter(1) ROOT convolution = f32[32,0,24,24]{3,2,1,0} convolution(input, kernel), window={size=5x5}, dim_labels=bf01_io01->bf01 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); HloComputation* entry_computation = module->entry_computation(); HloInstruction* conv_instr = entry_computation->root_instruction(); cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features( [](int64_t shape_size) { return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment; }); EXPECT_FALSE(cpu::PotentiallyImplementedAsEigenConvolution( *conv_instr, target_machine_features)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emission_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emission_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
90d52628-50f5-4b95-b9ab-5399df301b62
cpp
tensorflow/tensorflow
fusion_deduplication_cache
third_party/xla/xla/service/gpu/fusion_deduplication_cache.cc
third_party/xla/xla/service/gpu/fusion_deduplication_cache_test.cc
#include "xla/service/gpu/fusion_deduplication_cache.h" #include <cstddef> #include <cstdint> #include <utility> #include "absl/container/flat_hash_map.h" #include "absl/hash/hash.h" #include "xla/hlo/ir/dfs_hlo_visitor.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/shape_util.h" namespace xla { namespace gpu { namespace { class HloInstructionPtrHash { public: size_t operator()(const HloInstruction* instr) const { return absl::HashOf(*instr); } }; class HloInstructionPtrEq { public: size_t operator()(const HloInstruction* instr1, const HloInstruction* instr2) const { auto operands_eq = [](const HloInstruction* a, const HloInstruction* b) { if (a == b) return true; return ShapeUtil::Equal(a->shape(), b->shape()); }; auto eq_computations = [](const HloComputation* a, const HloComputation* b) { return *a == *b; }; return instr1->Identical(*instr2, operands_eq, eq_computations); } }; } FusionDeduplicationCache FusionDeduplicationCache::Create( const HloModule& module) { absl::flat_hash_map<const HloInstruction*, InstructionId, HloInstructionPtrHash, HloInstructionPtrEq> deduplicated_id_map; absl::flat_hash_map<const HloInstruction*, InstructionId> instruction_id_map; int64_t instruction_count = module.instruction_count(); deduplicated_id_map.reserve(instruction_count); instruction_id_map.reserve(instruction_count); int64_t next_id = 0; for (const HloComputation* computation : module.computations()) { for (const HloInstruction* instruction : computation->instructions()) { auto it = deduplicated_id_map.emplace(instruction, next_id); if (it.second) { ++next_id; } instruction_id_map[instruction] = it.first->second; } } return FusionDeduplicationCache(next_id, std::move(instruction_id_map)); } FusionDeduplicationCache::InstructionId FusionDeduplicationCache::GetInstructionId(const HloInstruction& instruction) { return instruction_id_map_.at(&instruction); } FusionDeduplicationCache::FusionId FusionDeduplicationCache::GetFusionId( const HloInstruction& producer, const HloInstruction& consumer, int64_t consumer_operand_index) { FusionDeduplicationCache::FusionId fusion_id{GetInstructionId(producer), GetInstructionId(consumer), consumer_operand_index}; if (fusion_id_map_.emplace(fusion_id, next_id_).second) { ++next_id_; } return fusion_id; } FusionDeduplicationCache::FusionId FusionDeduplicationCache::GetFusionId( const HloInstruction& producer, const HloInstruction& consumer) { return GetFusionId(producer, consumer, consumer.operand_index(&producer)); } void FusionDeduplicationCache::UpdateFusedInstructionId( const HloInstruction& fusion_instruction, const HloInstruction& original_producer, const HloInstruction& original_consumer, int64_t consumer_operand_index) { instruction_id_map_[&fusion_instruction] = fusion_id_map_.at(GetFusionId( original_producer, original_consumer, consumer_operand_index)); } } }
#include "xla/service/gpu/fusion_deduplication_cache.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer) { HloComputation* computation = consumer->parent(); HloInstruction* fusion_instruction = consumer; if (consumer->opcode() != HloOpcode::kFusion) { fusion_instruction = computation->AddInstruction(HloInstruction::CreateFusion( consumer->shape(), HloInstruction::FusionKind::kLoop, consumer)); TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction)); } if (producer->opcode() == HloOpcode::kFusion) { fusion_instruction->MergeFusionInstruction(producer); } else { fusion_instruction->FuseInstruction(producer); } if (producer->user_count() == 0) { TF_CHECK_OK(computation->RemoveInstruction(producer)); } return fusion_instruction; } using FusionDeduplicationCacheTest = HloTestBase; TEST_F(FusionDeduplicationCacheTest, IdenticalInstructions_EqualId) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) add1 = f32[8] add(p0, p1) ROOT add2 = f32[8] add(add1, p1) })")); FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module); const HloInstruction* add2 = module->entry_computation()->root_instruction(); const HloInstruction* add1 = add2->operand(0); EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2)); } TEST_F(FusionDeduplicationCacheTest, IdenticalInstructionsInDifferentComputations_EqualId) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module computation.1 { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ROOT add1 = f32[8] add(p0, p1) } ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) ROOT add2 = f32[8] add(p0, p0) })")); FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module); const HloInstruction* add1 = module->GetComputationWithName("computation.1")->root_instruction(); const HloInstruction* add2 = module->entry_computation()->root_instruction(); EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2)); } TEST_F(FusionDeduplicationCacheTest, IdenticalFusionInstructions_EqualId) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) log1 = f32[8] log(p0) add1 = f32[8] add(log1, p1) log2 = f32[8] log(add1) ROOT add2 = f32[8] add(log2, p0) })")); HloComputation* entry_computation = module->entry_computation(); auto* add1 = entry_computation->GetInstructionWithName("add1"); auto* add2 = entry_computation->GetInstructionWithName("add2"); auto* log1 = entry_computation->GetInstructionWithName("log1"); auto* log2 = entry_computation->GetInstructionWithName("log2"); FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module); EXPECT_EQ(cache.GetInstructionId(*add1), cache.GetInstructionId(*add2)); EXPECT_EQ(cache.GetInstructionId(*log1), cache.GetInstructionId(*log2)); EXPECT_NE(cache.GetInstructionId(*add1), cache.GetInstructionId(*log1)); EXPECT_EQ(cache.GetFusionId(*log1, *add1), cache.GetFusionId(*log2, *add2)); HloInstruction* fusion1 = Fuse(log1, add1); cache.UpdateFusedInstructionId(*fusion1, *log1, *add1, 0); HloInstruction* fusion2 = Fuse(log2, add2); cache.UpdateFusedInstructionId(*fusion2, *log2, *add2, 0); EXPECT_EQ(cache.GetInstructionId(*fusion1), cache.GetInstructionId(*fusion2)); } TEST_F(FusionDeduplicationCacheTest, DifferentConsumerOperandIndex_DifferentId) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY main { p0 = f32[8] parameter(0) p1 = f32[8] parameter(1) log1 = f32[8] log(p0) add1 = f32[8] add(log1, p1) log2 = f32[8] log(add1) ROOT add2 = f32[8] add(p0, log2) })")); HloComputation* entry_computation = module->entry_computation(); auto* add1 = entry_computation->GetInstructionWithName("add1"); auto* add2 = entry_computation->GetInstructionWithName("add2"); auto* log1 = entry_computation->GetInstructionWithName("log1"); auto* log2 = entry_computation->GetInstructionWithName("log2"); FusionDeduplicationCache cache = FusionDeduplicationCache::Create(*module); EXPECT_NE(cache.GetFusionId(*log1, *add1), cache.GetFusionId(*log2, *add2)); HloInstruction* fusion1 = Fuse(log1, add1); cache.UpdateFusedInstructionId(*fusion1, *log1, *add1, 0); HloInstruction* fusion2 = Fuse(log2, add2); cache.UpdateFusedInstructionId(*fusion2, *log2, *add2, 1); EXPECT_NE(cache.GetInstructionId(*fusion1), cache.GetInstructionId(*fusion2)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_deduplication_cache.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_deduplication_cache_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
bea3fc56-baf4-4f52-a00f-73076a7b90ad
cpp
tensorflow/tensorflow
fusion_process_dump
third_party/xla/xla/service/gpu/fusion_process_dump.cc
third_party/xla/xla/service/gpu/fusion_process_dump_test.cc
#include "xla/service/gpu/fusion_process_dump.h" #include <string> #include <string_view> #include <utility> #include "absl/container/flat_hash_map.h" #include "absl/container/inlined_vector.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/fusion_process_dump.pb.h" #include "xla/stream_executor/device_description.h" #include "xla/tools/hlo_module_loader.h" #include "xla/util.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { HloInstruction* AddFusionInstruction(HloInstruction* producer, HloInstruction* consumer, HloComputation* computation, std::string_view fusion_name) { if (consumer->opcode() == HloOpcode::kFusion) { return consumer; } auto kind = HloInstruction::FusionKind::kLoop; auto fusion_instruction = computation->AddInstruction( HloInstruction::CreateFusion(consumer->shape(), kind, consumer), fusion_name); TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction)); return fusion_instruction; } HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer, HloComputation* computation, std::string_view fusion_name) { HloInstruction* fusion_instruction = AddFusionInstruction(producer, consumer, computation, fusion_name); if (producer->opcode() == HloOpcode::kFusion) { fusion_instruction->MergeFusionInstruction(producer); } else { fusion_instruction->FuseInstruction(producer); } if (producer->user_count() == 0) { TF_CHECK_OK(computation->RemoveInstruction(producer)); } return fusion_instruction; } absl::string_view GetProducerName(const FusionStep& step) { if (step.has_fusion()) { return step.fusion().producer_name(); } if (step.has_update_priority()) { return step.update_priority().producer_name(); } if (step.has_producer_ineligible()) { return step.producer_ineligible().producer_name(); } LOG(FATAL) << "Producer name not found in the current step."; } } absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromFile( const std::string& path) { std::string format = std::string(tsl::io::Extension(path)); std::string data; TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data)); return FusionProcessDump::LoadFromData(data, format); } absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromData( const std::string& data, absl::string_view format) { FusionProcessDumpProto fusion_process_dump_proto; if (format == "txt" || format == "pbtxt") { if (!tsl::protobuf::TextFormat::ParseFromString( data, &fusion_process_dump_proto)) { return InvalidArgument("Failed to parse input as HLO protobuf text"); } } else if (format == "pb") { if (!fusion_process_dump_proto.ParseFromString(data)) { return InvalidArgument("Failed to parse input as HLO protobuf binary"); } } else { return InvalidArgument( "Invalid format from file extension: '%s'. Expected: txt, pb, or pbtxt", format); } return FusionProcessDump::LoadFromProto(fusion_process_dump_proto); } absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromProto( const FusionProcessDumpProto& fusion_process_dump_proto) { TF_ASSIGN_OR_RETURN( auto module, LoadModuleFromData(fusion_process_dump_proto.hlo_module_before_fusion(), "txt")); se::DeviceDescription gpu_device_info( fusion_process_dump_proto.gpu_device_info()); absl::flat_hash_map<std::string, HloComputation*> instruction_name_to_computation_map; for (HloComputation* computation : module->MakeNonfusionComputations()) { for (HloInstruction* instr : computation->instructions()) { instruction_name_to_computation_map[instr->name()] = computation; } } return FusionProcessDump(std::move(fusion_process_dump_proto), std::move(module), std::move(gpu_device_info), std::move(instruction_name_to_computation_map)); } HloComputation* FusionProcessDump::GetCurrentComputation() { return instruction_name_to_computation_map_.at( GetProducerName(CurrentStep())); } HloInstruction* FusionProcessDump::GetInstructionWithName( absl::string_view name) { return instruction_name_to_computation_map_[name]->GetInstructionWithName( name); } HloInstruction* FusionProcessDump::GetProducer() { return GetInstructionWithName(GetProducerName(CurrentStep())); } absl::InlinedVector<HloInstruction*, 2> FusionProcessDump::GetConsumers() { auto& step = CurrentStep(); if (step.has_fusion()) { return {GetInstructionWithName(step.fusion().consumer_name())}; } if (step.has_update_priority()) { absl::InlinedVector<HloInstruction*, 2> consumers; for (const auto& consumer_name : step.update_priority().consumer_names()) { consumers.push_back(GetInstructionWithName(consumer_name)); } return consumers; } return {}; } const FusionStep& FusionProcessDump::CurrentStep() { CHECK(HasNext()); return fusion_process_dump_proto_.fusion_steps(current_step_idx_); } bool FusionProcessDump::HasNext() { return current_step_idx_ < fusion_process_dump_proto_.fusion_steps_size(); } void FusionProcessDump::Advance() { auto step = CurrentStep(); if (step.has_fusion()) { const auto& fusion_step = step.fusion(); auto* computation = GetCurrentComputation(); HloInstruction* producer = computation->GetInstructionWithName(fusion_step.producer_name()); HloInstruction* consumer = computation->GetInstructionWithName(fusion_step.consumer_name()); HloInstruction* fusion = Fuse(producer, consumer, computation, fusion_step.fusion_name()); instruction_name_to_computation_map_[fusion->name()] = computation; last_fusion_ = fusion; } ++current_step_idx_; } } }
#include "xla/service/gpu/fusion_process_dump.h" #include <string> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/fusion_process_dump.pb.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace m = ::xla::match; namespace xla { namespace gpu { namespace { using FusionProcessDumpTest = HloTestBase; void AddFusion(FusionProcessDumpProto& dump_proto, const std::string& fusion_name, const std::string& producer_name, const std::string& consumer_name) { auto step = dump_proto.add_fusion_steps(); auto fusion_step = step->mutable_fusion(); fusion_step->set_fusion_name(fusion_name); fusion_step->set_producer_name(producer_name); fusion_step->set_consumer_name(consumer_name); } TEST_F(FusionProcessDumpTest, MultipleFusionSteps) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY main { p0 = f32[] parameter(0) p1 = f32[] parameter(1) add = f32[] add(p0, p1) subtract = f32[] subtract(p0, p1) abs = f32[] abs(subtract) ROOT multiply = f32[] multiply(add, abs) })")); FusionProcessDumpProto dump_proto; *dump_proto.mutable_gpu_device_info() = TestGpuDeviceInfo::RTXA6000DeviceInfo().ToGpuProto(); dump_proto.set_hlo_module_before_fusion( module->ToString(HloPrintOptions::ShortParsable())); AddFusion(dump_proto, "fusion.1", "subtract", "abs"); AddFusion(dump_proto, "fusion.2", "fusion.1", "multiply"); AddFusion(dump_proto, "fusion.2", "add", "fusion.2"); TF_ASSERT_OK_AND_ASSIGN(auto fusion_process_dump, FusionProcessDump::LoadFromProto(dump_proto)); fusion_process_dump.Advance(); fusion_process_dump.Advance(); fusion_process_dump.Advance(); EXPECT_FALSE(fusion_process_dump.HasNext()); auto root = fusion_process_dump.module()->entry_computation()->root_instruction(); EXPECT_EQ(root->name(), "fusion.2"); ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); EXPECT_THAT(root->fused_expression_root(), GmockMatch(m::Multiply( m::Add(m::Parameter(), m::Parameter()), m::Abs(m::Subtract(m::Parameter(), m::Parameter()))))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1559345f-112d-4a23-82b6-d268bfab2164
cpp
tensorflow/tensorflow
kernel_reuse_cache
third_party/xla/xla/service/gpu/kernel_reuse_cache.cc
third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc
#include "xla/service/gpu/kernel_reuse_cache.h" #include <functional> #include <string> #include <utility> #include "absl/container/flat_hash_map.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/executable.pb.h" #include "xla/service/gpu/kernel_arguments.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/status_macros.h" #include "xla/stream_executor/launch_dim.h" #include "xla/util.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { namespace { std::string GetArgumentFingerprint( absl::Span<const KernelArgument> kernel_arguments) { return absl::StrJoin( kernel_arguments, ",", [](std::string* s, const KernelArgument& arg) { if (arg.first_with_same_slice().has_value()) { absl::StrAppend(s, "=", arg.first_with_same_slice().value()); return; } absl::StrAppend(s, arg.alignment()); if (arg.aliased()) { absl::StrAppend(s, "a"); } if (arg.written()) { absl::StrAppend(s, "w"); } }); } } std::string GetComputationFingerprint( const HloComputation* fused_computation, absl::Span<const KernelArgument> kernel_arguments, absl::string_view discriminator) { auto print_options = HloPrintOptions::Fingerprint() .set_print_only_essential_constants(false) .set_print_operand_shape(false); return absl::StrCat(discriminator, "(", GetArgumentFingerprint(kernel_arguments), ")", fused_computation->ToString(print_options)); } absl::Status KernelReuseCache::Load(const CompilationCacheProto& proto) { for (const auto& [name, entry] : proto.entries()) { std::optional<se::ClusterDim> cluster_dim; if (entry.has_cluster_dim()) { cluster_dim = se::ClusterDim{entry.cluster_dim().x(), entry.cluster_dim().y(), entry.cluster_dim().z()}; } TF_RET_CHECK( cache_ .insert( {entry.fingerprint(), Entry{name, LaunchDimensions{ entry.launch_dimensions().num_blocks(), entry.launch_dimensions().num_threads_per_block()}, cluster_dim, entry.shmem_bytes(), entry.binary()}}) .second); } return absl::OkStatus(); } CompilationCacheProto KernelReuseCache::Export() const { CompilationCacheProto proto; for (const auto& [fingerprint, cache_entry] : cache_) { if (!hits_.contains(fingerprint)) { VLOG(5) << "Not exporting unused " << cache_entry.kernel_name; continue; } auto [it, inserted] = proto.mutable_entries()->emplace( cache_entry.kernel_name, CompilationCacheEntryProto{}); CHECK(inserted) << cache_entry.kernel_name; CompilationCacheEntryProto& proto_entry = it->second; proto_entry.set_fingerprint(fingerprint); LaunchDimensionsProto launch_dimensions_proto; launch_dimensions_proto.set_num_blocks( cache_entry.launch_dimensions.num_blocks()); launch_dimensions_proto.set_num_threads_per_block( cache_entry.launch_dimensions.num_threads_per_block()); *proto_entry.mutable_launch_dimensions() = launch_dimensions_proto; if (cache_entry.cluster_dim.has_value()) { ClusterDimProto cluster_dim_proto; cluster_dim_proto.set_x(cache_entry.cluster_dim->x); cluster_dim_proto.set_y(cache_entry.cluster_dim->y); cluster_dim_proto.set_z(cache_entry.cluster_dim->z); *proto_entry.mutable_cluster_dim() = cluster_dim_proto; } proto_entry.set_shmem_bytes(cache_entry.shmem_bytes); proto_entry.set_binary(cache_entry.binary); } return proto; } absl::Status UpdateDiskKernelCache( absl::string_view path, const bool do_append, const CompilationCacheProto& current_cache, absl::Span<const KernelReuseCache::NamedBinary> binaries_to_cache) { CompilationCacheProto disk_cache; if (do_append) { std::string serialized; TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), std::string(path), &serialized)); if (!disk_cache.ParseFromString(std::string(serialized))) { return Internal("Failed to parse serialized CompilationCacheProto."); } } auto entries = disk_cache.mutable_entries(); int stored_kernel_count = 0; for (const auto& [name, binary] : binaries_to_cache) { auto it_current = current_cache.entries().find(name); TF_RET_CHECK(it_current != current_cache.entries().end()); auto [it_disk, inserted] = entries->insert({name, it_current->second}); TF_RET_CHECK(inserted); TF_RET_CHECK(!binary.empty()); it_disk->second.set_binary(reinterpret_cast<const char*>(binary.data()), binary.size()); VLOG(5) << "Cached kernel: " << name << ": " << binary.size(); ++stored_kernel_count; } if (stored_kernel_count > 0) { TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), std::string(path), disk_cache.SerializeAsString())); VLOG(2) << "Stored " << stored_kernel_count << " / " << binaries_to_cache.size() << " kernels in the cache file."; } return absl::OkStatus(); } std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool> KernelReuseCache::GetWithStatus( const HloComputation* fused_computation, absl::Span<const KernelArgument> kernel_arguments, absl::string_view discriminator, const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) { std::string fingerprint = GetComputationFingerprint( fused_computation, kernel_arguments, discriminator); VLOG(4) << "Fingerprint: "; XLA_VLOG_LINES(4, fingerprint); return GetWithStatus(std::move(fingerprint), generator); } std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool> KernelReuseCache::GetWithStatus( std::string fingerprint, const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) { hits_.insert(fingerprint); auto it = cache_.find(fingerprint); if (it != cache_.end()) { return {&it->second, true}; } absl::StatusOr<Entry> entry = generator(); if (entry.ok()) { it = cache_.insert({std::move(fingerprint), std::move(entry.value())}).first; return {&it->second, false}; } return {entry.status(), false}; } } }
#include "xla/service/gpu/kernel_reuse_cache.h" #include <gtest/gtest.h> #include "absl/log/check.h" #include "xla/service/gpu/executable.pb.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/env.h" namespace xla { namespace gpu { namespace { using KernelReuseTest = ::testing::Test; TEST_F(KernelReuseTest, ExportAndLoadWork) { KernelReuseCache cache; EXPECT_TRUE(cache.IsEmpty()); auto [result, was_cached] = cache.GetWithStatus( "fingerprint", []() { return KernelReuseCache::Entry{}; }); TF_EXPECT_OK(result); EXPECT_NE(result.value(), nullptr); EXPECT_FALSE(was_cached); EXPECT_FALSE(cache.IsEmpty()); const CompilationCacheProto proto = cache.Export(); cache.Clear(); EXPECT_TRUE(cache.IsEmpty()); TF_EXPECT_OK(cache.Load(proto)); EXPECT_FALSE(cache.IsEmpty()); } TEST_F(KernelReuseTest, UpdatingDiskKernelCacheWorks) { std::string cache_file_path; CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_path)); { const CompilationCacheProto proto = [](std::string kernel_name) { KernelReuseCache cache; auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() { return KernelReuseCache::Entry{.kernel_name = kernel_name}; }); return cache.Export(); }("k1"); TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, false, proto, {{.name = "k1", .binary = {5, 6}}})); } { const CompilationCacheProto proto = [](std::string kernel_name) { KernelReuseCache cache; auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() { return KernelReuseCache::Entry{.kernel_name = kernel_name}; }); return cache.Export(); }("k2"); TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, true, proto, {{.name = "k2", .binary = {7, 8}}})); } std::string serialized; TF_EXPECT_OK( tsl::ReadFileToString(tsl::Env::Default(), cache_file_path, &serialized)); CompilationCacheProto proto; EXPECT_TRUE(proto.ParseFromString(std::string(serialized))); EXPECT_EQ(proto.entries_size(), 2); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b0dfb30d-f1d3-464d-99a2-1da73dd33128
cpp
tensorflow/tensorflow
gpu_spmd_pipeline
third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc
third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc
#include "xla/service/gpu/gpu_spmd_pipeline.h" #include <cstdint> #include <optional> #include "absl/functional/function_ref.h" #include "absl/log/check.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/hlo/transforms/hlo_constant_splitter.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/conditional_simplifier.h" #include "xla/service/gather_expander.h" #include "xla/service/gpu/transforms/algebraic_simplifier.h" #include "xla/service/hlo_constant_folding.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_module_config.h" #include "xla/service/reshape_mover.h" #include "xla/service/scatter_expander.h" #include "xla/service/sharding_propagation.h" #include "xla/service/sort_simplifier.h" #include "xla/service/spmd/collective_permute_motion.h" #include "xla/service/spmd/shardy/shardy_xla_pass.h" #include "xla/service/spmd/stateful_rng_spmd_partitioner.h" #include "xla/service/tuple_simplifier.h" #include "xla/service/while_loop_constant_sinking.h" #include "xla/service/while_loop_simplifier.h" #include "xla/stream_executor/device_description.h" namespace xla { namespace gpu { void AddSPMDPasses( const HloModule* hlo_module, const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts, const se::GpuComputeCapability& compute_capability, HloPassPipeline& spmd_pipeline, std::optional<const absl::FunctionRef<void(HloPassPipeline&)>> auto_sharding_func) { const int64_t num_partitions = hlo_module->config().num_partitions(); CHECK_GE(num_partitions, 1); HloPassPipeline& spmd_simplify = spmd_pipeline.AddPass<HloPassFix<HloPassPipeline>>("spmd-simplify"); spmd_simplify.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts, compute_capability); spmd_simplify.AddPass<SortSimplifier>(); spmd_simplify.AddPass<TupleSimplifier>(); spmd_simplify.AddPass<ScatterExpander>( ScatterExpander::kEliminateSimpleScatters); spmd_simplify.AddPass<GatherExpander>( GatherExpander::kEliminateSimpleGathers); spmd_simplify.AddPass<WhileLoopConstantSinking>(); spmd_simplify.AddPass<WhileLoopSimplifier>(); ReshapeMoverOptions reshape_mover_options; reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true; spmd_simplify.AddPass<ReshapeMover>(reshape_mover_options); spmd_simplify.AddPass<HloPassFix<GpuAlgebraicSimplifier>>( layout_insensitive_algsimp_opts, compute_capability); spmd_simplify.AddPass<HloConstantFolding>(); spmd_simplify.AddPass<ConditionalSimplifier>(); const HloModuleConfig& config = hlo_module->config(); if (config.use_shardy_partitioner()) { spmd_pipeline.AddPass<sdy::ShardyXLA>(); } else { spmd_pipeline.AddPass<HloConstantSplitter>(); spmd_simplify.AddPass<HloDCE>(); if (auto_sharding_func.has_value()) { (*auto_sharding_func)(spmd_pipeline); } spmd_pipeline.AddPass<ShardingPropagation>( true, false, config.allow_spmd_sharding_propagation_to_output()); } spmd_pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>( num_partitions, hlo_module->config().replica_count(), hlo_module->config() .debug_options() .xla_gpu_threshold_for_windowed_einsum_mib(), hlo_module->config() .debug_options() .xla_gpu_multi_streamed_windowed_einsum(), true, true); spmd_pipeline.AddPass<CollectivePermuteMotion>(); } } }
#include "xla/service/gpu/gpu_spmd_pipeline.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/log/log.h" #include "xla/client/executable_build_options.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_parser.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class GpuSpmdPartitioningTest : public HloTestBase, public ::testing::WithParamInterface<bool> { public: absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation( const char* hlo_module, int64_t num_devices) { HloModuleConfig config = GetModuleConfigForTest( 1, num_devices); config.set_num_partitions(num_devices); config.set_use_shardy_partitioner(UseShardy()); TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module, config)); HloPassPipeline spmd_pipeline("spmd-partitioner"); se::CudaComputeCapability ampere(8, 0); AlgebraicSimplifierOptions alg_simplifier_options; AddSPMDPasses(module.get(), alg_simplifier_options, ampere, spmd_pipeline, std::nullopt); TF_RETURN_IF_ERROR(spmd_pipeline.Run(module.get()).status()); XLA_VLOG_LINES(10, module->ToString()); return module; } protected: bool UseShardy() const { return GetParam(); } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); return debug_options; } }; TEST_P(GpuSpmdPartitioningTest, DotWithEntryComputationLayout) { const char* const kHloModule = R"( HloModule module, entry_computation_layout={(f32[8,16]{0,1}, f32[16,24]{1,0}) ->f32[8,24]{1,0}} ENTRY main { %p0 = f32[8,16] parameter(0), sharding={devices=[1,8]<=[8]} %p1 = f32[16,24] parameter(1), sharding={devices=[8,1]<=[8]} ROOT %dot = f32[8,24] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(kHloModule, 8)); EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(0), ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 2}, {0, 1})); EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(1), ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {1, 0})); EXPECT_EQ(module->config().entry_computation_layout().result_shape(), ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 24}, {1, 0})); } std::string TestParamToString( const ::testing::TestParamInfo<bool>& param_info) { return param_info.param ? "Shardy" : "GSPMD"; } INSTANTIATE_TEST_SUITE_P(All, GpuSpmdPartitioningTest, ::testing::Values(true, false), TestParamToString); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7dde5b77-37ca-4936-8496-28e66a591fb5
cpp
tensorflow/tensorflow
buffer_comparator
third_party/xla/xla/service/gpu/buffer_comparator.cc
third_party/xla/xla/service/gpu/buffer_comparator_test.cc
#include "xla/service/gpu/buffer_comparator.h" #include <algorithm> #include <cmath> #include <cstdint> #include <string_view> #include <type_traits> #include <vector> #include "Eigen/Core" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/device_memory_handle.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/typed_kernel_factory.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/ml_dtypes.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { template <typename ElementT> using ComparisonKernelT = se::TypedKernel<se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>, float, uint64_t, se::DeviceMemory<uint64_t>>; struct ComparisonParams { double relative_tol = 0.1; bool verbose = true; const Shape* shape = nullptr; se::Stream* stream = nullptr; se::DeviceMemoryBase current{}; se::DeviceMemoryBase expected{}; }; template <typename ElementT> static absl::StatusOr<bool> DeviceCompare(std::string_view kernel_name, void* kernel_symbol, const ComparisonParams& params) { se::StreamExecutor* executor = params.stream->parent(); se::DeviceMemoryHandle out(executor, executor->AllocateScalar<uint64_t>()); TF_RETURN_IF_ERROR( params.stream->MemZero(out.memory_ptr(), sizeof(uint64_t))); if (params.current.size() != params.expected.size()) { return Internal("Mismatched buffer size: %d bytes vs. %d bytes", params.current.size(), params.expected.size()); } se::DeviceMemory<ElementT> current_typed(params.current); se::DeviceMemory<ElementT> expected_typed(params.expected); uint64_t buffer_size = current_typed.ElementCount(); TF_ASSIGN_OR_RETURN( ComparisonKernelT<ElementT> comparison_kernel, (se::TypedKernelFactory< se::DeviceMemory<ElementT>, se::DeviceMemory<ElementT>, float, uint64_t, se::DeviceMemory<uint64_t>>::Create(executor, kernel_name, kernel_symbol))); const se::DeviceDescription& gpu_device_info = executor->GetDeviceDescription(); LaunchDimensions dim = CalculateLaunchDimensions(*params.shape, gpu_device_info); se::DeviceMemory<uint64_t> as_uint64(out.memory()); TF_RETURN_IF_ERROR(params.stream->ThenLaunch( dim.thread_counts_per_block(), dim.block_counts(), comparison_kernel, current_typed, expected_typed, static_cast<float>(params.relative_tol), buffer_size, as_uint64)); uint64_t result = -1; CHECK_EQ(out.memory().size(), sizeof(result)); TF_RETURN_IF_ERROR( params.stream->Memcpy(&result, out.memory(), sizeof(result))); TF_RETURN_IF_ERROR(params.stream->BlockHostUntilDone()); return result == 0; } template <typename ElementType, typename ComparisonType> static absl::StatusOr<bool> HostCompare(const ComparisonParams& params) { int64_t n = params.current.size() / sizeof(ElementType); std::vector<ElementType> host_current(n), host_expected(n); TF_RETURN_IF_ERROR(params.stream->Memcpy(host_current.data(), params.current, params.current.size())); TF_RETURN_IF_ERROR(params.stream->Memcpy( host_expected.data(), params.expected, params.expected.size())); TF_RETURN_IF_ERROR(params.stream->BlockHostUntilDone()); const auto canonicalize = [](ComparisonType a) -> ComparisonType { if (std::is_same<ElementType, Eigen::half>::value && a) { constexpr ComparisonType kMaxFp16Value = 65505; if (std::isnan(a)) { return a; } return std::max(-kMaxFp16Value, std::min(a, kMaxFp16Value)); } return a; }; int differences_seen = 0; for (int64_t i = 0; i < n && differences_seen < 10; ++i) { auto current_value = static_cast<ComparisonType>(host_current[i]); auto expected_value = static_cast<ComparisonType>(host_expected[i]); ComparisonType current_value_canonical = canonicalize(current_value); ComparisonType expected_value_canonical = canonicalize(expected_value); if (std::isnan(current_value_canonical) && std::isnan(expected_value_canonical)) { continue; } if (std::isinf(current_value_canonical) && std::isinf(expected_value_canonical) && current_value_canonical == expected_value_canonical) { continue; } if (std::isfinite(current_value_canonical) != std::isfinite(expected_value_canonical) || !(std::abs(current_value_canonical - expected_value_canonical) / (std::max(std::abs(current_value_canonical), std::abs(expected_value_canonical)) + 1) < params.relative_tol)) { if (!params.verbose) return false; ++differences_seen; LOG(ERROR) << "Difference at " << i << ": " << current_value << ", expected " << expected_value; } } return differences_seen == 0; } template <typename ElementT, typename ComparisonT> static absl::StatusOr<bool> CompareEqualParameterized( std::string_view kernel_name, void* kernel_symbol, const ComparisonParams& params) { XLA_SCOPED_LOGGING_TIMER("BufferComparator::CompareEqual"); TF_ASSIGN_OR_RETURN( bool result, DeviceCompare<ElementT>(kernel_name, kernel_symbol, params)); if (result) { return true; } TF_ASSIGN_OR_RETURN(bool host_return, (HostCompare<ElementT, ComparisonT>(params))); CHECK_EQ(host_return, result) << "Host comparison succeeded even though GPU comparison failed."; return false; } absl::StatusOr<bool> BufferComparator::CompareEqual( se::Stream* stream, se::DeviceMemoryBase current, se::DeviceMemoryBase expected) const { ComparisonParams params{relative_tol_, verbose_, &shape_, stream, current, expected}; switch (shape_.element_type()) { #if GOOGLE_CUDA case xla::F8E4M3FN: return CompareEqualParameterized<tsl::float8_e4m3fn, float>( "fp8_e4m3fn_comparison", buffer_comparator::fp8_e4m3fn_comparison(), params); case xla::F8E5M2: return CompareEqualParameterized<tsl::float8_e5m2, float>( "fp8_e5m2_comparison", buffer_comparator::fp8_e5m2_comparison(), params); #endif #if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60200 case xla::F8E4M3FNUZ: return CompareEqualParameterized<tsl::float8_e4m3fnuz, float>( "fp8_e4m3fnuz_comparison", buffer_comparator::fp8_e4m3fnuz_comparison(), params); case xla::F8E5M2FNUZ: return CompareEqualParameterized<tsl::float8_e5m2fnuz, float>( "fp8_e5m2fnuz_comparison", buffer_comparator::fp8_e5m2fnuz_comparison(), params); #endif case xla::F16: return CompareEqualParameterized<Eigen::half, float>( "fp16_comparison", buffer_comparator::fp16_comparison(), params); case xla::BF16: return CompareEqualParameterized<Eigen::bfloat16, float>( "bf16_comparison", buffer_comparator::bf16_comparison(), params); case xla::F32: return CompareEqualParameterized<float, float>( "fp32_comparison", buffer_comparator::fp32_comparison(), params); case xla::F64: return CompareEqualParameterized<double, double>( "fp64_comparison", buffer_comparator::fp64_comparison(), params); case xla::S8: return CompareEqualParameterized<int8_t, float>( "int8_comparison", buffer_comparator::int8_comparison(), params); case xla::S32: return CompareEqualParameterized<int32_t, float>( "int32_comparison", buffer_comparator::int32_comparison(), params); default: return Unimplemented("Unimplemented element type"); } } BufferComparator::BufferComparator(const Shape& shape, double tolerance, bool verbose) : shape_(shape), relative_tol_(tolerance), verbose_(verbose) { auto double_dim_size = [&]() { int64_t prev_zero_dim_size = shape_.dimensions(0); shape_.set_dimensions(0, prev_zero_dim_size * 2); }; if (shape_.element_type() == PrimitiveType::C64) { shape_.set_element_type(PrimitiveType::F32); double_dim_size(); } else if (shape_.element_type() == PrimitiveType::C128) { shape_.set_element_type(PrimitiveType::F64); double_dim_size(); } } } }
#include "xla/service/gpu/buffer_comparator.h" #include <cmath> #include <complex> #include <cstdint> #include <limits> #include <vector> #include "xla/primitive_util.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/hlo_module_config.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/device_memory_handle.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/types.h" #include "tsl/platform/ml_dtypes.h" #include "tsl/platform/status.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { constexpr double kDefaultTolerance = 0.1; class BufferComparatorTest : public testing::Test { protected: BufferComparatorTest() #if GOOGLE_CUDA : platform_(se::PlatformManager::PlatformWithName("CUDA").value()), #elif TENSORFLOW_USE_ROCM : platform_(se::PlatformManager::PlatformWithName("ROCM").value()), #endif stream_exec_(platform_->ExecutorForDevice(0).value()) { } template <typename ElementType> bool CompareEqualBuffers(const std::vector<ElementType>& current, const std::vector<ElementType>& expected, double tolerance) { auto stream = stream_exec_->CreateStream().value(); se::DeviceMemoryHandle current_buffer( stream_exec_, stream_exec_->AllocateArray<ElementType>(current.size())); se::DeviceMemoryHandle expected_buffer( stream_exec_, stream_exec_->AllocateArray<ElementType>(expected.size())); TF_CHECK_OK(stream->Memcpy(current_buffer.memory_ptr(), current.data(), current_buffer.memory().size())); TF_CHECK_OK(stream->Memcpy(expected_buffer.memory_ptr(), expected.data(), expected_buffer.memory().size())); TF_CHECK_OK(stream->BlockHostUntilDone()); BufferComparator comparator( ShapeUtil::MakeShape( primitive_util::NativeToPrimitiveType<ElementType>(), {static_cast<int64_t>(current.size())}), tolerance); return comparator .CompareEqual(stream.get(), current_buffer.memory(), expected_buffer.memory()) .value(); } template <typename ElementType> bool CompareEqualFloatBuffers(const std::vector<float>& lhs_float, const std::vector<float>& rhs_float, double tolerance = kDefaultTolerance) { std::vector<ElementType> lhs(lhs_float.begin(), lhs_float.end()); std::vector<ElementType> rhs(rhs_float.begin(), rhs_float.end()); return CompareEqualBuffers(lhs, rhs, tolerance); } template <typename ElementType> bool CompareEqualComplex(const std::vector<std::complex<ElementType>>& lhs, const std::vector<std::complex<ElementType>>& rhs) { return CompareEqualBuffers<std::complex<ElementType>>(lhs, rhs, kDefaultTolerance); } se::Platform* platform_; se::StreamExecutor* stream_exec_; }; TEST_F(BufferComparatorTest, TestComplex) { EXPECT_FALSE( CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}})); EXPECT_TRUE(CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2.2, 3.3}})); EXPECT_TRUE( CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 3}})); EXPECT_FALSE( CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 3}})); EXPECT_FALSE( CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {6, 7}})); EXPECT_FALSE( CompareEqualComplex<float>({{0.1, 0.2}, {2, 3}}, {{0.1, 6}, {2, 3}})); EXPECT_TRUE(CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2.2, 3.3}})); EXPECT_FALSE( CompareEqualComplex<double>({{0.1, 0.2}, {2, 3}}, {{0.1, 0.2}, {2, 7}})); } TEST_F(BufferComparatorTest, TestNaNs) { EXPECT_TRUE( CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {std::nanf("")})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {std::nanf("1234")})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({std::nanf("")}, {1.})); EXPECT_TRUE( CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("")})); EXPECT_TRUE( CompareEqualFloatBuffers<float>({std::nanf("")}, {std::nanf("1234")})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({std::nanf("")}, {1.})); EXPECT_TRUE( CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("")})); EXPECT_TRUE( CompareEqualFloatBuffers<double>({std::nanf("")}, {std::nanf("1234")})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({std::nanf("")}, {1.})); } TEST_F(BufferComparatorTest, TestInfs) { const auto inf = std::numeric_limits<float>::infinity(); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {std::nanf("")})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {inf})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({inf}, {65504})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-65504})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-65504})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {65504})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({inf}, {-20})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({-inf}, {-20})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {std::nanf("")})); EXPECT_TRUE(CompareEqualFloatBuffers<float>({inf}, {inf})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {65504})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-65504})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-65504})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {65504})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({inf}, {-20})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({-inf}, {-20})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {std::nanf("")})); EXPECT_TRUE(CompareEqualFloatBuffers<double>({inf}, {inf})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {65504})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-65504})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-65504})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {65504})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({inf}, {-20})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({-inf}, {-20})); #if GOOGLE_CUDA EXPECT_TRUE( CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {std::nanf("")})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {inf})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-inf})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {448})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-448})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({inf}, {-20})); EXPECT_FALSE( CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {std::nanf("")})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {inf})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-inf})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {57344})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-57344})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({inf}, {-20})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {20})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({-inf}, {-20})); #endif } TEST_F(BufferComparatorTest, TestNumbers) { EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {20.1})); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({20}, {23.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({20}, {26.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({0}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({9}, {10})); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({10}, {9})); EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {20.1})); EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {23.0})); EXPECT_TRUE(CompareEqualFloatBuffers<float>({20}, {23.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<float>({20}, {26.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<float>({0}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<float>({0.9}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<float>({9}, {10})); EXPECT_TRUE(CompareEqualFloatBuffers<float>({10}, {9})); EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {20.1})); EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {23.0})); EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {23.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {26.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<double>({0}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<double>({0.9}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<double>({9}, {10})); EXPECT_TRUE(CompareEqualFloatBuffers<double>({10}, {9})); EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {101})); EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({100}, {120})); EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {120}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({90}, {120}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({0}, {10})); EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({9}, {10})); EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({90}, {100})); EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({100}, {90})); EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({-128}, {127})); #if GOOGLE_CUDA EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {20.1})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {23.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({20}, {26.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({0.9}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>({9}, {10})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {20.1})); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {23.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({20}, {30.0}, 0.2)); EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({0.9}, {1})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({11}, {12})); EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>({12}, {11})); #endif const double tol = 0.001; EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {1}, tol)); EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>({0.9}, {0.901}, tol)); EXPECT_FALSE(CompareEqualFloatBuffers<float>({10}, {10.1}, tol)); EXPECT_TRUE(CompareEqualFloatBuffers<float>({10}, {10.01}, tol)); EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>({100}, {101}, tol)); EXPECT_FALSE(CompareEqualFloatBuffers<double>({20}, {20.1}, tol)); EXPECT_TRUE(CompareEqualFloatBuffers<double>({20}, {20.01}, tol)); } TEST_F(BufferComparatorTest, TestMultiple) { { EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>( {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1})); std::vector<float> lhs(200); std::vector<float> rhs(200); for (int i = 0; i < 200; i++) { EXPECT_TRUE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs)) << "should be the same at index " << i; lhs[i] = 3; rhs[i] = 5; EXPECT_FALSE(CompareEqualFloatBuffers<Eigen::half>(lhs, rhs)) << "should be the different at index " << i; lhs[i] = 0; rhs[i] = 0; } } { EXPECT_TRUE(CompareEqualFloatBuffers<float>( {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1})); std::vector<float> lhs(200); std::vector<float> rhs(200); for (int i = 0; i < 200; i++) { EXPECT_TRUE(CompareEqualFloatBuffers<float>(lhs, rhs)) << "should be the same at index " << i; lhs[i] = 3; rhs[i] = 5; EXPECT_FALSE(CompareEqualFloatBuffers<float>(lhs, rhs)) << "should be the different at index " << i; lhs[i] = 0; rhs[i] = 0; } } { EXPECT_TRUE(CompareEqualFloatBuffers<double>( {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1})); std::vector<float> lhs(200); std::vector<float> rhs(200); for (int i = 0; i < 200; i++) { EXPECT_TRUE(CompareEqualFloatBuffers<double>(lhs, rhs)) << "should be the same at index " << i; lhs[i] = 3; rhs[i] = 5; EXPECT_FALSE(CompareEqualFloatBuffers<double>(lhs, rhs)) << "should be the different at index " << i; lhs[i] = 0; rhs[i] = 0; } } { EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>({20, 30, 40, 50, 60}, {21, 31, 41, 51, 61})); std::vector<float> lhs(200); std::vector<float> rhs(200); for (int i = 0; i < 200; i++) { EXPECT_TRUE(CompareEqualFloatBuffers<int8_t>(lhs, rhs)) << "should be the same at index " << i; lhs[i] = 3; rhs[i] = 5; EXPECT_FALSE(CompareEqualFloatBuffers<int8_t>(lhs, rhs)) << "should be the different at index " << i; lhs[i] = 0; rhs[i] = 0; } } #if GOOGLE_CUDA { EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>( {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1})); std::vector<float> lhs(200); std::vector<float> rhs(200); for (int i = 0; i < 200; i++) { EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs)) << "should be the same at index " << i; lhs[i] = 3; rhs[i] = 5; EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e4m3fn>(lhs, rhs)) << "should be the different at index " << i; lhs[i] = 0; rhs[i] = 0; } } { EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>( {20, 30, 40, 50, 60}, {20.1, 30.1, 40.1, 50.1, 60.1})); std::vector<float> lhs(200); std::vector<float> rhs(200); for (int i = 0; i < 200; i++) { EXPECT_TRUE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs)) << "should be the same at index " << i; lhs[i] = 3; rhs[i] = 5; EXPECT_FALSE(CompareEqualFloatBuffers<tsl::float8_e5m2>(lhs, rhs)) << "should be the different at index " << i; lhs[i] = 0; rhs[i] = 0; } } #endif } TEST_F(BufferComparatorTest, BF16) { const int element_count = 3123; int64_t rng_state = 0; auto stream = stream_exec_->CreateStream().value(); se::DeviceMemoryHandle lhs( stream_exec_, stream_exec_->AllocateArray<Eigen::bfloat16>(element_count)); InitializeBuffer(stream.get(), BF16, &rng_state, lhs.memory()); se::DeviceMemoryHandle rhs( stream_exec_, stream_exec_->AllocateArray<Eigen::bfloat16>(element_count)); InitializeBuffer(stream.get(), BF16, &rng_state, rhs.memory()); BufferComparator comparator(ShapeUtil::MakeShape(BF16, {element_count})); EXPECT_FALSE(comparator.CompareEqual(stream.get(), lhs.memory(), rhs.memory()) .value()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_comparator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_comparator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5befe34d-a742-4dd8-8eb2-5fbb362c43ee
cpp
tensorflow/tensorflow
gpu_fusible
third_party/xla/xla/service/gpu/gpu_fusible.cc
third_party/xla/xla/service/gpu/gpu_fusible_test.cc
#include "xla/service/gpu/gpu_fusible.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <optional> #include <stack> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/synchronization/mutex.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/permutation_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/reduction_utils.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/service/instruction_fusion.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" namespace xla { namespace gpu { namespace { bool HasAnyTiledTransposeRoot(const HloComputation& computation) { return absl::c_any_of(GetFusionRoots(computation), [&](const HloInstruction* instr) { return GetDescriptionForTiledTransposeEmitter( FindNonTrivialHero(*instr)) .has_value(); }); } const Shape& GetElementShape(const HloFusionAnalysis& analysis) { const Shape* shape = &analysis.fusion_root(0).shape(); while (shape->IsTuple()) { shape = &shape->tuple_shapes(0); } return *shape; } int ComputeMaxUnrollFactor(int64_t num_elements) { constexpr int kMaxUnrollFactor = 4; for (int i = kMaxUnrollFactor; i > 1; i /= 2) { if (num_elements % i == 0) { return i; } } return 1; } } bool IfFusedReadsElementsMultipleTimes(const HloInstruction& instr) { CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused."; if (instr.opcode() == HloOpcode::kGather || instr.opcode() == HloOpcode::kBroadcast) { return ShapeUtil::ElementsIn(instr.shape()) > ShapeUtil::ElementsIn(instr.operand(0)->shape()); } if (instr.opcode() == HloOpcode::kReduceWindow) { for (const auto& dim : instr.window().dimensions()) { if (dim.size() > dim.stride()) { return true; } } } return false; } bool IsExpensiveToRepeat(const HloInstruction& instr) { CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused."; constexpr int kMaxInputsPerOutput = 10; if (instr.opcode() == HloOpcode::kReduce && !IsReductionFromOrToContiguousDimensions(instr)) { int64_t reduction_ratio = ShapeUtil::ElementsIn(instr.operand(0)->shape()) / ShapeUtil::ElementsIn(instr.shape()); if (reduction_ratio > kMaxInputsPerOutput) return true; } if (instr.opcode() == HloOpcode::kReduceWindow) { int64_t reduction_ratio = 1; for (const auto& dim : instr.window().dimensions()) reduction_ratio *= dim.size(); if (reduction_ratio > kMaxInputsPerOutput) return true; } return false; } bool IsPhysicallyTransposing(const HloInstruction& instr) { if (instr.opcode() == HloOpcode::kFusion) { for (const HloInstruction* fused_instr : instr.fused_instructions()) { if (IsPhysicallyTransposing(*fused_instr)) { return true; } } } return instr.opcode() == HloOpcode::kCopy || (instr.opcode() == HloOpcode::kTranspose && !ShapeUtil::TransposeIsBitcast(instr.operand(0)->shape(), instr.shape(), instr.dimensions())); } namespace { std::pair<int64_t, int64_t> MostMinorNonTrivialDimension(const Shape& shape) { int64_t position_of_first_non_trivial_dim = 0; for (int64_t dim : shape.layout().minor_to_major()) { if (shape.dimensions()[dim] > 1) { return {dim, position_of_first_non_trivial_dim}; } ++position_of_first_non_trivial_dim; } return {-1, position_of_first_non_trivial_dim}; } } bool TransposesMinorDimension(const HloInstruction* instr) { switch (instr->opcode()) { case HloOpcode::kFusion: return absl::c_any_of(instr->fused_instructions(), TransposesMinorDimension); case HloOpcode::kCopy: { int64_t first_non_trivial_operand_dim = MostMinorNonTrivialDimension(instr->operand(0)->shape()).first; int64_t first_non_trivial_output_dim = MostMinorNonTrivialDimension(instr->shape()).first; return first_non_trivial_operand_dim != first_non_trivial_output_dim; } case HloOpcode::kTranspose: { auto position_in_minor_to_major = InversePermutation( instr->operand(0)->shape().layout().minor_to_major()); int64_t position_of_first_non_trivial_dim = MostMinorNonTrivialDimension(instr->operand(0)->shape()).second; for (int64_t output_dim : instr->shape().layout().minor_to_major()) { if (instr->shape().dimensions()[output_dim] == 1) { continue; } int64_t operand_dim = instr->dimensions().at(output_dim); return position_in_minor_to_major[operand_dim] > position_of_first_non_trivial_dim; } return false; } default: return false; } } bool IsReduceInputFusion(const HloInstruction& instr) { return instr.opcode() == HloOpcode::kFusion && absl::c_any_of(GetFusionRoots(*instr.called_computations()[0]), [](const HloInstruction* root) { return IsRealReductionHero(*root, FindNonTrivialHero(*root)); }); } bool IsInputFusibleReduction(const HloInstruction& instr) { return IsReduceInputFusion(instr) || IsReductionFromOrToContiguousDimensions(instr); } bool IsNestableVariadicReduction(const HloInstruction& instr) { return instr.shape().IsTuple() && ((instr.opcode() == HloOpcode::kReduce && !IsReductionFromOrToContiguousDimensions(instr)) || (instr.opcode() == HloOpcode::kFusion && instr.fusion_kind() == HloInstruction::FusionKind::kLoop && instr.fused_expression_root()->opcode() == HloOpcode::kReduce)); } bool IsInputFusibleTranspose(const HloInstruction& instr) { if (instr.opcode() == HloOpcode::kBitcast || instr.IsCustomFusion()) { return false; } if (instr.opcode() == HloOpcode::kFusion) { return HasAnyTiledTransposeRoot(*instr.fused_instructions_computation()); } return GetDescriptionForTiledTransposeEmitter(instr).has_value(); } const HloInstruction* GetRealHeroForMultiOutputFusion( const HloInstruction& instr) { if (instr.opcode() != HloOpcode::kFusion) { return &instr; } auto fused_expression_root = instr.fused_expression_root(); if (!instr.IsMultiOutputFusion()) { const auto& hero = FindNonTrivialHero(*fused_expression_root); if (IsRealReductionHero(*fused_expression_root, hero) || GetDescriptionForTiledTransposeEmitter(hero).has_value()) { return &hero; } return fused_expression_root; } for (auto* inst : fused_expression_root->mutable_operands()) { const auto& hero = FindNonTrivialHero(*inst); if (IsRealReductionHero(*inst, hero) || GetDescriptionForTiledTransposeEmitter(hero).has_value()) { return &hero; } } return fused_expression_root->operands()[0]; } FusionDecision FusionHeroesAreCompatible(const HloInstruction* hero1, const HloInstruction* hero2) { auto hero1_is_unnested_reduce = IsReductionFromOrToContiguousDimensions(*hero1); auto tiled_transpose_hero1 = GetDescriptionForTiledTransposeEmitter(*hero1); bool hero1_is_unnested_transpose = tiled_transpose_hero1.has_value(); bool hero2_is_unnested_reduce = IsReductionFromOrToContiguousDimensions(*hero2); auto tiled_transpose_hero2 = GetDescriptionForTiledTransposeEmitter(*hero2); bool hero2_is_unnested_transpose = tiled_transpose_hero2.has_value(); if (hero1_is_unnested_reduce && hero2_is_unnested_reduce && !AreReductionsMultiOutputFusionCompatible(hero2, hero1)) { return FusionDecision::Forbid("tiled reductions with different shapes"); } else if (hero1_is_unnested_transpose && hero2_is_unnested_transpose && !tiled_transpose_hero1->IsEquivalent(*tiled_transpose_hero2)) { return FusionDecision::Forbid("tiled transposes with different shapes"); } else if ((hero1_is_unnested_transpose && hero2_is_unnested_reduce) || (hero1_is_unnested_reduce && hero2_is_unnested_transpose)) { return FusionDecision::Forbid("MOF-fusion of a transpose and a reduction"); } if (hero1_is_unnested_transpose || hero2_is_unnested_transpose) { auto check_path_of_intermediate_ops = [](HloInstruction* param) { if (param->user_count() != 1) { return false; } HloInstruction* hlo = param->users()[0]; while (hlo->user_count() > 0) { if (!IsIntermediate(hlo)) { return false; } hlo = hlo->users()[0]; } return true; }; HloInstruction* fusion1 = hero1->parent()->FusionInstruction(); HloInstruction* fusion2 = hero2->parent()->FusionInstruction(); if (fusion1 != nullptr && fusion2 != nullptr) { if (hero1_is_unnested_transpose && fusion2->IsUserOf(fusion1)) { int64_t operand_idx = fusion2->operand_index(fusion1); auto hlo = fusion2->fused_parameter(operand_idx); if (!check_path_of_intermediate_ops(hlo)) { return FusionDecision::Forbid("tiled transpose would become untiled"); } } else if (hero2_is_unnested_transpose && fusion1->IsUserOf(fusion2)) { int64_t operand_idx = fusion1->operand_index(fusion2); auto hlo = fusion1->fused_parameter(operand_idx); if (!check_path_of_intermediate_ops(hlo)) { return FusionDecision::Forbid("tiled transpose would become untiled"); } } } } return FusionDecision::Allow(); } FusionDecision ShapesCompatibleForMultiOutputFusion( const HloInstruction& instr1, const HloInstruction& instr2) { auto get_loop_shape = [&](const HloInstruction* element_instr) { const auto& hero = element_instr->parent()->IsFusionComputation() ? FindNonTrivialHero(*element_instr) : *element_instr; if (IsReductionFromOrToContiguousDimensions(*element_instr) || GetDescriptionForTiledTransposeEmitter(hero).has_value()) { return hero.operand(0)->shape(); } return element_instr->shape(); }; const HloInstruction* hero1 = GetRealHeroForMultiOutputFusion(instr1); const HloInstruction* hero2 = GetRealHeroForMultiOutputFusion(instr2); if (auto compatible = FusionHeroesAreCompatible(hero1, hero2); !compatible) { return compatible; } const Shape& l1 = get_loop_shape(hero1); const Shape& l2 = get_loop_shape(hero2); bool accept_unequal_shape = !l1.IsTuple() && !l2.IsTuple(); if (!ShapeUtil::EqualIgnoringElementType(l1, l2) && (!accept_unequal_shape || !ShapeUtil::IsReshapeOrTransposeBitcast(l1, l2, true))) { return FusionDecision::Forbid("different loop shapes"); } return FusionDecision::Allow(); } bool IsInputFusibleScatter(const HloInstruction& instr) { if (instr.opcode() == HloOpcode::kScatter || (instr.opcode() == HloOpcode::kFusion && instr.fusion_kind() == HloInstruction::FusionKind::kInput && instr.fused_expression_root()->opcode() == HloOpcode::kScatter)) { return true; } return false; } bool IsInputFusible(const HloInstruction& instr) { return instr.IsFusible() && (IsInputFusibleReduction(instr) || IsInputFusibleScatter(instr) || IsInputFusibleTranspose(instr)); } bool IsUniversallyLoopFusible(const HloInstruction& instr) { if (instr.IsElementwise() && instr.operand_count() > 0 && instr.opcode() != HloOpcode::kCopy) { return true; } switch (instr.opcode()) { case HloOpcode::kCopy: return !GetDescriptionForTiledTransposeEmitter(instr).has_value(); case HloOpcode::kFusion: return instr.fusion_kind() == HloInstruction::FusionKind::kLoop; case HloOpcode::kBitcast: case HloOpcode::kBroadcast: case HloOpcode::kConcatenate: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kGather: case HloOpcode::kPad: case HloOpcode::kReduceWindow: case HloOpcode::kReshape: case HloOpcode::kReverse: case HloOpcode::kSlice: case HloOpcode::kTranspose: return true; default: return false; } } bool IsLoopFusibleAsConsumer(const HloInstruction& instr) { if (!instr.IsFusible()) return false; if (instr.opcode() == HloOpcode::kBitcast) return false; if (instr.opcode() == HloOpcode::kReduce) return true; if (!IsInputFusible(instr) && instr.opcode() == HloOpcode::kFusion && instr.fusion_kind() == HloInstruction::FusionKind::kInput) { return true; } return IsUniversallyLoopFusible(instr); } bool IsLoopFusibleAsProducer(const HloInstruction& instr) { if (!instr.IsFusible()) return false; switch (instr.opcode()) { case HloOpcode::kIota: case HloOpcode::kConstant: return true; case HloOpcode::kReduce: return !instr.shape().IsTuple(); default: return IsUniversallyLoopFusible(instr); } } static bool AllSatisfy(const HloInstruction& instr, const HloPredicate& predicate) { if (instr.opcode() != HloOpcode::kFusion) { return predicate(&instr); } return absl::c_all_of( instr.fused_instructions(), [&](const HloInstruction* i) { return i->opcode() == HloOpcode::kParameter || predicate(i); }); } FusionDecision CanEmitInputFusedScatter(const HloInstruction& producer, const HloInstruction& consumer) { if (IsInputFusibleScatter(producer)) { return FusionDecision::Forbid("do not fuse into the output of scatter"); } if (!IsInputFusibleScatter(consumer)) { return FusionDecision::Allow(); } const HloInstruction* inplace_operand; if (consumer.opcode() == HloOpcode::kFusion) { const HloInstruction* scatter = consumer.fused_expression_root(); CHECK_EQ(scatter->opcode(), HloOpcode::kScatter); CHECK_EQ(scatter->operand(0)->opcode(), HloOpcode::kParameter); inplace_operand = consumer.operand(scatter->operand(0)->parameter_number()); } else { inplace_operand = consumer.operand(0); } if (inplace_operand == &producer) { return FusionDecision::Forbid( "do not fuse into the in-place operand of scatter"); } if (absl::c_linear_search(producer.operands(), inplace_operand)) { return FusionDecision::Forbid( "Producer uses the in-place operand of a scatter"); } return FusionDecision::Allow(); } FusionDecision IsProducerConsumerFusible(const HloInstruction& producer, const HloInstruction& consumer) { if (!IsLoopFusibleAsProducer(producer) && !IsInputFusibleTranspose(producer)) { return FusionDecision::Forbid("the producer is not loop-fusible"); } if (IsInputFusibleReduction(producer)) { if (!producer.GetModule() ->config() .debug_options() .xla_gpu_enable_reduction_epilogue_fusion()) { return FusionDecision::Forbid( "Reduction epilogue fusion is not enabled."); } const HloInstruction& reduce_hero = producer.opcode() == HloOpcode::kFusion ? FindNonTrivialHero(*producer.fused_expression_root()) : producer; if (!ReductionIsRaceFree( reduce_hero.GetModule()->config(), GetReductionKindAndContiguousComponents(reduce_hero))) { return FusionDecision::Forbid( "Reduction output fusion only works for race free reductions"); } if (!AllSatisfy(consumer, [](const HloInstruction* hlo) { return IsIntermediate(hlo, 1); })) { return FusionDecision::Forbid( "Reductions from/to continuous dims epilogue not fusible"); } if (producer.user_count() > 1) { return FusionDecision::Forbid( "reduction output fusion only works for single user"); } } if (auto can_fuse = CanEmitInputFusedScatter(producer, consumer); !can_fuse) { return can_fuse; } if (!IsInputFusible(consumer) && !IsLoopFusibleAsConsumer(consumer)) { return FusionDecision::Forbid( "the consumer is not input-fusible and not loop-fusible"); } if (producer.IsMultiOutputFusion()) { return FusionDecision::Forbid( "the producer is not fusible as it is a multi-output fusion"); } if (producer.opcode() == HloOpcode::kConstant && (!ShapeUtil::IsEffectiveScalar(producer.shape()) || consumer.opcode() != HloOpcode::kFusion)) { return FusionDecision::Forbid("not fusing constant"); } return InstructionFusion::ShouldFuseInPlaceOp(&producer, &consumer); } FusionDecision IsProducerMultiOutputFusible(const HloInstruction& producer) { if (producer.IsMultiOutputFusion()) { return FusionDecision::Forbid("Producer is a multi-output fusion"); } if (!HloDataflowAnalysis::GetInPlaceInputOutputPairs(&producer).empty()) { return FusionDecision::Forbid("In-place operations are present"); } if (!IsLoopFusibleAsProducer(producer)) { return FusionDecision::Forbid("producer is not loop-fusible"); } if (IsPhysicallyTransposing(producer)) { return FusionDecision::Forbid("producer is physically transposing"); } return FusionDecision::Allow(); } static int64_t SharedMemoryUsageNoCache(const HloInstruction& instr) { if (instr.opcode() == HloOpcode::kFusion) { int64_t sum = 0; for (const HloInstruction* hlo : instr.fused_instructions_computation()->instructions()) { sum += SharedMemoryUsageNoCache(*hlo); } return sum; } else if (instr.opcode() == HloOpcode::kReduce && IsReductionFromOrToContiguousDimensions(instr)) { ReductionDimensions reduction_info = GetReductionKindAndContiguousComponents(instr); int64_t primitive_size = ShapeUtil::ByteSizeOfPrimitiveType( instr.operand(0)->shape().element_type()); int num_variadic = instr.shape().IsTuple() ? instr.shape().tuple_shapes_size() : 1; if (reduction_info.is_row_reduction) { return 32 * primitive_size * num_variadic; } else { return 4 * 32 * 33 * primitive_size * num_variadic; } } else if (auto tr = GetDescriptionForTiledTransposeEmitter(instr)) { int64_t primitive_size = ShapeUtil::ByteSizeOfPrimitiveType(instr.shape().element_type()); int64_t bytes_required = 32 * 33 * primitive_size; if (tr->permutation.back() == tr->permutation.size() - 1) { bytes_required *= tr->dimensions.back(); } return bytes_required; } return 0; } int64_t FusionInfoCache::GetSharedMemoryUsage(const HloInstruction& instr) { { absl::MutexLock lock(&mutex_); auto it = shared_memory_usage_.find(&instr); if (it != shared_memory_usage_.end()) { return it->second; } } int64_t shared_memory_usage = SharedMemoryUsageNoCache(instr); absl::MutexLock lock(&mutex_); shared_memory_usage_.emplace(&instr, shared_memory_usage); return shared_memory_usage; } int64_t SharedMemoryUsage(const HloInstruction& instr, FusionInfoCache* cache) { if (!cache) { return SharedMemoryUsageNoCache(instr); } return cache->GetSharedMemoryUsage(instr); } constexpr int64_t kMaxUnnestedReductionOutputsPerFusion = 8; static int64_t NumUnnestedReductionsNoCache(const HloInstruction& instr) { if (instr.opcode() == HloOpcode::kReduce && IsReductionFromOrToContiguousDimensions(instr)) { return 1; } if (instr.opcode() == HloOpcode::kFusion) { int64_t sum = 0; for (const HloInstruction* hlo : instr.fused_instructions_computation()->instructions()) { sum += NumUnnestedReductionsNoCache(*hlo); } return sum; } return 0; } int64_t FusionInfoCache::GetNumUnnestedReductions(const HloInstruction& instr) { { absl::MutexLock lock(&mutex_); auto it = num_unnested_reductions_.find(&instr); if (it != num_unnested_reductions_.end()) { return it->second; } } int64_t num_unnested_reductions = NumUnnestedReductionsNoCache(instr); absl::MutexLock lock(&mutex_); num_unnested_reductions_.emplace(&instr, num_unnested_reductions); return num_unnested_reductions; } static int64_t NumUnnestedReductions(const HloInstruction& instr, FusionInfoCache* cache) { if (!cache) { return NumUnnestedReductionsNoCache(instr); } return cache->GetNumUnnestedReductions(instr); } FusionDecision FusionFitsInBudget(const HloInstruction& instr1, const HloInstruction& instr2, const se::DeviceDescription& device_info, bool is_consumer_producer_fusion, FusionInfoCache* cache ) { if (SharedMemoryUsage(instr1, cache) + SharedMemoryUsage(instr2, cache) > device_info.shared_memory_per_block()) { return FusionDecision::Forbid( "shared memory usage would be over the budget of ") << device_info.shared_memory_per_block() << "B"; } if (NumUnnestedReductions(instr1, cache) + NumUnnestedReductions(instr2, cache) > kMaxUnnestedReductionOutputsPerFusion) { return FusionDecision::Forbid("over ") << kMaxUnnestedReductionOutputsPerFusion << " unnested reductions in fusion"; } int64_t num_output_buffers = ShapeUtil::SubshapeCount(instr1.shape()) + ShapeUtil::SubshapeCount(instr2.shape()); if (instr1.operand_count() + instr2.operand_count() - 1 + num_output_buffers <= MaxOperandsAndOutputsPerFusion()) { return FusionDecision::Allow(); } else { VLOG(5) << "Operand count of " << "(" << instr1.ToString() << " ) = " << instr1.operand_count() << " and ( " << instr2.ToString() << " ) = " << instr2.operand_count() << " and num_output_buffers = " << num_output_buffers << " is bigger than the bound of " << MaxOperandsAndOutputsPerFusion(); } absl::flat_hash_set<const HloInstruction*> operands(instr1.operands().begin(), instr1.operands().end()); operands.insert(instr2.operands().begin(), instr2.operands().end()); operands.erase(&instr1); operands.erase(&instr2); if (is_consumer_producer_fusion && operands.size() <= instr1.operands().size()) { return FusionDecision::Allow(); } if (operands.size() + num_output_buffers > MaxOperandsAndOutputsPerFusion()) { return FusionDecision::Forbid( "Number of operands and output buffers is larger than allowed budget " "per fusion"); } return FusionDecision::Allow(); } bool CreatesHeavyComputation(const HloInstruction& producer, const HloInstruction& consumer) { auto producer_is_heavy = [&](const HloInstruction& instr) { if (producer.opcode() != HloOpcode::kFusion) { return IsExpensiveToRepeat(producer); } for (const auto& instr : producer.fused_instructions()) { if (IsExpensiveToRepeat(*instr)) { return true; } } return false; }; if (!producer_is_heavy(producer)) { return false; } if (consumer.opcode() != HloOpcode::kFusion) { return IfFusedReadsElementsMultipleTimes(consumer); } for (const HloInstruction* operand : consumer.operands()) { if (operand != &producer) { continue; } const HloInstruction* root = consumer.fused_instructions_computation()->parameter_instruction( consumer.operand_index(operand)); std::stack<const HloInstruction*> dfs; dfs.push(root); absl::flat_hash_set<const HloInstruction*> visited; while (!dfs.empty()) { const HloInstruction* cur = dfs.top(); dfs.pop(); if (!visited.insert(cur).second) { continue; } if (IfFusedReadsElementsMultipleTimes(*cur)) { return true; } for (const auto& user : cur->users()) { if (visited.contains(user)) { continue; } dfs.push(user); } } } return false; } bool IsFusibleAsMultiOutputFusionRoot(const HloInstruction& instr) { return instr.IsFusible() && !instr.IsCustomFusion() && (IsInputFusibleReduction(instr) || IsInputFusibleTranspose(instr) || instr.IsLoopFusion() || instr.IsElementwise()); } HloInstruction::FusionKind ChooseFusionKind(const HloInstruction& producer, const HloInstruction& consumer) { return (IsInputFusible(consumer) || IsInputFusible(producer)) ? HloInstruction::FusionKind::kInput : HloInstruction::FusionKind::kLoop; } bool IsConsumerTheOnlyNonRootUser(const HloInstruction& instr, const HloInstruction& consumer) { return absl::c_all_of(instr.users(), [&](const HloInstruction* user) { if (user->opcode() == HloOpcode::kGetTupleElement) { return IsConsumerTheOnlyNonRootUser(*user, consumer); } return user == &consumer || user == user->parent()->root_instruction(); }); } size_t GetInstrCountOfFusible(const HloInstruction& instr) { return instr.opcode() == HloOpcode::kFusion ? instr.fused_instruction_count() : 1; } absl::InlinedVector<const HloInstruction*, 2> GetOutputsOfFusible( const HloInstruction& instr) { if (instr.opcode() != HloOpcode::kFusion) { return {&instr}; } HloInstruction* root = instr.fused_expression_root(); if (root->opcode() != HloOpcode::kTuple) { return {root}; } else { auto v = root->operands(); return absl::InlinedVector<const HloInstruction*, 2>(v.begin(), v.end()); } } size_t GetOutputSizeOfFusible(const HloInstruction& instr) { if (!instr.IsMultiOutputFusion()) { return 1; } const HloInstruction* root = instr.fused_expression_root(); return ShapeUtil::TupleElementCount(root->shape()); } static void GetFusionRootsRec(const HloInstruction* root, std::vector<const HloInstruction*>& out) { if (root->opcode() == HloOpcode::kGetTupleElement && root->operand(0)->opcode() == HloOpcode::kTuple) { return GetFusionRootsRec(root->operand(0)->operand(root->tuple_index()), out); } else if (root->opcode() == HloOpcode::kGetTupleElement) { out.push_back(root->operand(0)); } else if (root->opcode() == HloOpcode::kTuple) { for (int i = 0; i < root->operand_count(); i++) { GetFusionRootsRec(root->operand(i), out); } } else { out.push_back(root); } } std::vector<const HloInstruction*> GetFusionRoots( const HloComputation& computation) { std::vector<const HloInstruction*> out; GetFusionRootsRec(computation.root_instruction(), out); return out; } bool IsGenericTritonFusion(const HloInstruction& instr) { return instr.opcode() == HloOpcode::kFusion && instr.fusion_kind() == HloInstruction::FusionKind::kCustom && instr.backend_config<GpuBackendConfig>().ok() && instr.backend_config<GpuBackendConfig>() ->fusion_backend_config() .kind() == kTritonFusionKind; } bool MayPreventVectorization(const HloFusionAdaptor& fusion) { static constexpr int kMaxConcatArgumentsForUnrolling = 10; return HloAnyOf(fusion, [&](auto node) { switch (node.opcode()) { case HloOpcode::kReduceWindow: case HloOpcode::kSort: case HloOpcode::kDot: case HloOpcode::kSin: case HloOpcode::kCos: case HloOpcode::kTan: case HloOpcode::kPower: case HloOpcode::kAtan2: return true; case HloOpcode::kConcatenate: return node.instruction().operand_count() > kMaxConcatArgumentsForUnrolling; case HloOpcode::kReduce: return node.instruction().shape().tuple_shapes_size() > 1; default: return false; } }); } std::vector<HloComputation*> GetFusibleComputations( const HloModule& module, const absl::flat_hash_set<absl::string_view>& execution_threads) { auto result = module.MakeComputationPostOrder(execution_threads); absl::flat_hash_set<const HloComputation*> computations_not_to_fuse; for (const auto* computation : result) { for (const auto* instr : computation->instructions()) { if (HloInstruction::MightHaveCalledComputations(instr->opcode()) && instr->opcode() != HloOpcode::kWhile && instr->opcode() != HloOpcode::kConditional && instr->opcode() != HloOpcode::kFusion) { for (auto* called : instr->called_computations()) { computations_not_to_fuse.insert(called); } } } } result.erase( std::remove_if(result.begin(), result.end(), [&](HloComputation* computation) { return computation->IsFusionComputation() || computations_not_to_fuse.contains(computation); }), result.end()); return result; } LaunchDimensionsConfig ComputeLoopFusionConfig( const HloFusionAnalysis& analysis) { return ComputeLoopFusionConfig(analysis, GetElementShape(analysis)); } LaunchDimensionsConfig ComputeLoopFusionConfig( const HloFusionAnalysis& analysis, const Shape& element_shape) { int unroll_factor = 1; int64_t num_elements = ShapeUtil::ElementsIn(element_shape); int64_t n_threads_max = analysis.device_info().threads_per_core_limit() * analysis.device_info().core_count(); if (num_elements >= n_threads_max && !MayPreventVectorization(analysis.fusion())) { unroll_factor = ComputeMaxUnrollFactor(num_elements); } CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor))); unroll_factor = std::max( unroll_factor, CeilOfRatio(8, analysis.input_output_info().smallest_output_dtype_bits)); CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor))); VLOG(2) << "Unroll factor: " << unroll_factor; LaunchDimensionsConfig launch_config{unroll_factor}; return launch_config; } } }
#include "xla/service/gpu/gpu_fusible.h" #include <memory> #include <vector> #include <gtest/gtest.h> #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { using ::testing::ElementsAre; using GpuFusibleTest = HloTestBase; const char kModulePrefix[] = R"( HloModule test_module scalar_add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) })"; TEST_F(GpuFusibleTest, IsPhysicallyTransposing_ElementwiseProducer) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { p0 = f32[2,2,2]{2,1,0} parameter(0) c0 = f32[] constant(0) exp = f32[2,2,2]{2,1,0} exponential(p0) ROOT reduce = f32[2,2]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* exp = module->entry_computation()->root_instruction()->operand(0); ASSERT_EQ(exp->opcode(), HloOpcode::kExp); EXPECT_FALSE(IsPhysicallyTransposing(*exp)); } TEST_F(GpuFusibleTest, IsPhysicallyTransposing_MixedLayoutProducer) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( mixed_input_layouts_computation { p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0) p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1) copy = f16[128,1024,32,32]{1,3,2,0} copy(p1.1) c0 = f16[] constant(0) broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={} greater-than = pred[128,1024,32,32]{1,3,2,0} compare(copy, broadcast), direction=GT ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast) } fused_reduce { p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0) convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2) c0.2 = f32[] constant(0) ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add } ENTRY entry { p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0) p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1) loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion) })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* loop_fusion = module->entry_computation()->root_instruction()->operand(1); ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect); EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion)); } TEST_F(GpuFusibleTest, IsPhysicallyTransposing_MixedLayoutProducerWithTrivialDim) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( mixed_input_layouts_computation { p0.1 = f16[128,1,32,32]{1,3,2,0} parameter(0) p1.1 = f16[128,1,32,32]{3,2,1,0} parameter(1) bitcast = f16[128,1,32,32]{1,3,2,0} bitcast(p1.1) c0 = f16[] constant(0) broadcast = f16[128,1,32,32]{1,3,2,0} broadcast(c0), dimensions={} greater-than = pred[128,1,32,32]{1,3,2,0} compare(bitcast, broadcast), direction=GT ROOT root = f16[128,1,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast) } fused_reduce { p0.2 = f16[128,1,32,32]{1,3,2,0} parameter(0) convert = f32[128,1,32,32]{1,3,2,0} convert(p0.2) c0.2 = f32[] constant(0) ROOT reduce = f32[1]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add } ENTRY entry { p0 = f16[128,1,32,32]{1,3,2,0} parameter(0) p1 = f16[128,1,32,32]{3,2,1,0} parameter(1) loop_fusion = f16[128,1,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation reduce_fusion = f32[1]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce ROOT root = (f32[1]{0}, f16[128,1,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion) })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* loop_fusion = module->entry_computation()->root_instruction()->operand(1); ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect); EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion)); } TEST_F(GpuFusibleTest, IsPhysicallyTransposing_CopyProducer) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduce { p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0) c0.1 = f32[] constant(0) ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={0,2,3}, to_apply=scalar_add } ENTRY entry { p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0) copy = f32[128,1024,32,32]{1,3,2,0} copy(p0) ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* copy = module->entry_computation()->root_instruction()->operand(0); ASSERT_EQ(copy->opcode(), HloOpcode::kCopy); EXPECT_TRUE(IsPhysicallyTransposing(*copy)); } TEST_F(GpuFusibleTest, IsPhysicallyTransposing_PhysicalTranspose) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduce { p0.1 = f32[1024,128,32,32]{3,2,1,0} parameter(0) c0.1 = f32[] constant(0) ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={1,2,3}, to_apply=scalar_add } ENTRY entry { p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0) copy = f32[1024,128,32,32]{3,2,1,0} transpose(p0), dimensions={1,0,2,3} ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* transpose = module->entry_computation()->root_instruction()->operand(0); ASSERT_EQ(transpose->opcode(), HloOpcode::kTranspose); EXPECT_TRUE(IsPhysicallyTransposing(*transpose)); } TEST_F(GpuFusibleTest, IsPhysicallyTransposing_LayoutChangingFusionProducer) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( layout_changing_computation { p0.1 = f16[128,1024,32,32]{3,2,1,0} parameter(0) p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1) c0 = f16[] constant(0) broadcast = f16[128,1024,32,32]{3,2,1,0} broadcast(c0), dimensions={} greater-than = pred[128,1024,32,32]{3,2,1,0} compare(p1.1, broadcast), direction=GT select = f16[128,1024,32,32]{3,2,1,0} select(greater-than, p0.1, broadcast) ROOT root = f16[128,1024,32,32]{1,3,2,0} copy(select) } fused_reduce { p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0) convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2) c0.2 = f32[] constant(0) ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add } ENTRY entry { p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0) p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1) loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=layout_changing_computation ROOT reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* loop_fusion = module->entry_computation()->root_instruction()->operand(0); ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kCopy); EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion)); } TEST_F(GpuFusibleTest, IsPhysicallyTransposing_ConsiderMaximumTrueRanksParamsOnly) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( broadcasting_computation { p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0) p1.1 = f32[1,128,1,1]{3,2,1,0} parameter(1) reshape = f32[128]{0} reshape(p1.1) broadcast = f32[128,1024,32,32]{1,3,2,0} broadcast(reshape), dimensions={0} ROOT add = f32[128,1024,32,32]{1,3,2,0} add(p0.1, broadcast) } ENTRY entry { p0 = f32[128,1024,32,32]{1,3,2,0} parameter(0) p1 = f32[1,128,1,1]{3,2,1,0} parameter(1) loop_fusion = f32[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=broadcasting_computation c0.2 = f32[] constant(0) ROOT reduce = f32[1024]{0} reduce(loop_fusion, c0.2), dimensions={0,2,3}, to_apply=scalar_add })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* loop_fusion = module->entry_computation()->root_instruction()->operand(0); ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kAdd); EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion)); } TEST_F(GpuFusibleTest, TransposesMinorDimension) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0) non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1) transpose_minor_default = f32[10,20,40,30]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2} no_transpose_minor_default = f32[10,20,40,30]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2} transpose_major_default = f32[10,30,20,40]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3} transpose_minor_non_default = f32[10,30,20,40]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3} no_transpose_minor_non_default = f32[10,20,40,30]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2} transpose_major_non_default = f32[10,20,40,30]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2} ROOT r = tuple(transpose_minor_default, no_transpose_minor_default, transpose_major_default, transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default) })")); auto* tuple = (*module)->entry_computation()->root_instruction(); EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2))); EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(4))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5))); } TEST_F(GpuFusibleTest, TransposesMinorDimensionSkipTrivialDimensions) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0) non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1) transpose_minor_default = f32[10,20,1,1]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2} transpose_nontrivial_minor_default = f32[10,1,20,1]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3} no_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2} transpose_one_major_default = f32[1,20,10,1]{3,2,1,0} transpose(default_layout), dimensions={2,1,0,3} transpose_two_major_default = f32[20,10,1,1]{3,2,1,0} transpose(default_layout), dimensions={1,0,2,3} transpose_minor_non_default = f32[10,1,20,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3} no_transpose_minor_non_default = f32[10,20,1,1]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2} transpose_major_non_default = f32[10,20,1,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2} ROOT r = tuple(transpose_minor_default, transpose_nontrivial_minor_default, no_transpose_minor_default, transpose_one_major_default, transpose_two_major_default, transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default) })")); auto* tuple = (*module)->entry_computation()->root_instruction(); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2))); EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3))); EXPECT_TRUE(TransposesMinorDimension(tuple->operand(4))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(6))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(7))); } TEST_F(GpuFusibleTest, CopyTransposesMinorDimension) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0) non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1) copy_transpose_minor_default = f32[10,20,30,40]{2,3,1,0} copy(default_layout) copy_no_transpose_minor_default = f32[10,20,30,40]{3,2,1,0} copy(default_layout) copy_transpose_minor_non_default = f32[10,20,30,40]{2,1,3,0} copy(non_default_layout) copy_no_transpose_minor_non_default = f32[10,20,30,40]{1,2,3,0} copy(non_default_layout) ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default, copy_transpose_minor_non_default, copy_no_transpose_minor_non_default) })")); auto* tuple = (*module)->entry_computation()->root_instruction(); EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1))); EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3))); } TEST_F(GpuFusibleTest, CopyTransposesMinorDimensionSkipTrivialDimensions) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0) non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1) copy_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} copy(default_layout) copy_no_transpose_minor_default = f32[10,20,1,1]{3,2,1,0} copy(default_layout) copy_transpose_minor_non_default = f32[10,20,1,1]{2,0,3,1} copy(non_default_layout) copy_no_transpose_minor_non_default = f32[10,20,1,1]{1,2,3,0} copy(non_default_layout) ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default, copy_transpose_minor_non_default, copy_no_transpose_minor_non_default) })")); auto* tuple = (*module)->entry_computation()->root_instruction(); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1))); EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2))); EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3))); } TEST_F(GpuFusibleTest, IsReduceInputFusion_ReductionToVector) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { c0 = f32[] parameter(0) p1 = f32[128,512,28,28]{3,2,1,0} parameter(1) ROOT reduce = f32[512]{0} reduce(p1, c0), dimensions={0,2,3}, to_apply=scalar_add })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce); EXPECT_FALSE(IsReduceInputFusion(*reduce)); EXPECT_TRUE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, IsReduceInputFusion_ElementalReduction) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { c0 = f32[] parameter(0) p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(1) ROOT reduce = f32[512,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={3,0}, to_apply=scalar_add })")) .value(); SCOPED_TRACE(module->ToString()); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce); EXPECT_FALSE(IsReduceInputFusion(*reduce)); EXPECT_FALSE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputInputReduceFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduction { c0 = f32[] constant(0) p1 = f32[128,512,28,28]{3,2,1,0} parameter(0) ROOT reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) ROOT fusion = f32[128,512]{1,0} fusion(p0), kind=kInput, calls=fused_reduction })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion); EXPECT_TRUE(IsReduceInputFusion(*reduce)); EXPECT_TRUE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputLoopReduceFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduction { c0 = f32[] constant(0) p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0) ROOT reduce = f32[8,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={1,3}, to_apply=scalar_add } ENTRY entry { p0 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0) ROOT fusion = f32[8,5,1,1]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_reduction })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion); EXPECT_FALSE(IsReduceInputFusion(*reduce)); EXPECT_FALSE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputInputReduceFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduction { c0 = f32[] constant(0) p1 = f32[128,512,28,28]{3,2,1,0} parameter(0) reduce.0 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add reduce.1 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add ROOT root = (f32[128,512]{1,0}, f32[128,512]{1,0}) tuple(reduce.0, reduce.1) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) ROOT fusion = (f32[128,512]{1,0}, f32[128,512]{1,0}) fusion(p0), kind=kInput, calls=fused_reduction })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion); EXPECT_TRUE(IsReduceInputFusion(*reduce)); EXPECT_TRUE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputInputReduceFusionWithExtraOutputs) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduction { c0 = f32[] constant(0) p1 = f32[128,512,28,28]{3,2,1,0} parameter(0) reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1) ROOT root = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) ROOT fusion = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_reduction })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion); EXPECT_TRUE(IsReduceInputFusion(*reduce)); EXPECT_TRUE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputLoopReduceFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduction { c0 = f32[] constant(0) p1 = f32[128,512,28,28]{3,2,1,0} parameter(0) reduce.0 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add reduce.1 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add ROOT root = (f32[512,28]{1,0}, f32[512,28]{1,0}) tuple(reduce.0, reduce.1) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) ROOT fusion = (f32[512,28]{1,0}, f32[512,28]{1,0}) fusion(p0), kind=kLoop, calls=fused_reduction })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion); EXPECT_FALSE(IsReduceInputFusion(*reduce)); EXPECT_FALSE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputLoopFusionReduceAndElementwiseOp) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduction { c0 = f32[] constant(0) p1 = f32[128,512,28,28]{3,2,1,0} parameter(0) reduce = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1) ROOT root = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) ROOT fusion = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_reduction })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction(); ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion); EXPECT_FALSE(IsReduceInputFusion(*reduce)); EXPECT_FALSE(IsInputFusibleReduction(*reduce)); } TEST_F(GpuFusibleTest, CustomFusionIsNotFusibleAsConsumer) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_fusion { p = s32[20,3] parameter(0) ROOT neg = s32[20,3] negate(p) } ENTRY e { p = s32[20,3] parameter(0) ROOT r = s32[20,3] fusion(p), kind=kCustom, calls=triton_fusion })")); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*root)); } TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionCompatible) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[64,32]{1,0} parameter(0) neg = f32[64,32]{1,0} negate(p0.1) ROOT transpose = f32[32,64]{1,0} transpose(neg), dimensions={1,0} } fused_computation_2 { p0.2 = f32[32,64]{1,0} parameter(0) neg = f32[32,64]{1,0} negate(p0.2) ROOT add = f32[32,64]{1,0} add(neg, neg) } ENTRY entry { p0 = f32[64,32]{1,0} parameter(0) fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1 ROOT fusion.2 = f32[32,64]{1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2 })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction(); const HloInstruction* fusion_2 = fusion_1->operand(0); EXPECT_TRUE(FusionHeroesAreCompatible(fusion_1->fused_expression_root(), fusion_2->fused_expression_root())); EXPECT_TRUE(FusionHeroesAreCompatible(fusion_2->fused_expression_root(), fusion_1->fused_expression_root())); } TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionNotCompatible) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[64,32]{1,0} parameter(0) neg = f32[64,32]{1,0} negate(p0.1) bc = f32[1,64,32]{2,1,0} bitcast(neg) transpose = f32[1,32,64]{2,1,0} transpose(bc), dimensions={0,2,1} ROOT bc2 = f32[32,64]{1,0} bitcast(transpose) } fused_computation_2 { p0.2 = f32[32,64]{1,0} parameter(0) broadcast = f32[32,64,4]{2,1,0} broadcast(p0.2), dimensions={0,1} ROOT add = f32[32,64,4]{2,1,0} add(broadcast, broadcast) } ENTRY entry { p0 = f32[64,32]{1,0} parameter(0) fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1 ROOT fusion.2 = f32[32,64,4]{2,1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2 })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction(); const HloInstruction* fusion_2 = fusion_1->operand(0); EXPECT_FALSE( FusionHeroesAreCompatible(fusion_1->fused_expression_root(), fusion_2->fused_expression_root()->operand(0))); EXPECT_FALSE( FusionHeroesAreCompatible(fusion_2->fused_expression_root()->operand(0), fusion_1->fused_expression_root())); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_LoopFusions) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[6400]{0} parameter(0) ROOT mul = f32[6400]{0} multiply(p0.1, p0.1) } fused_computation_2 { p0.2 = f32[6400]{0} parameter(0) const.2 = f32[] constant(1) broadcast = f32[6400]{0} broadcast(const.2), dimensions={} ROOT div = f32[6400]{0} divide(p0.2, broadcast) } ENTRY entry { p0 = f32[6400]{0} parameter(0) fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1 fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2 ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_IgnoreFpPrecision) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[6400]{0} parameter(0) ROOT mul = f32[6400]{0} multiply(p0.1, p0.1) } fused_computation_2 { p0.2 = f32[6400]{0} parameter(0) ROOT convert = f16[6400]{0} convert(p0.2) } ENTRY entry { p0 = f32[6400]{0} parameter(0) fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1 fusion.2 = f16[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2 ROOT root = (f32[6400]{0}, f16[6400]{0}) tuple(fusion.1, fusion.2) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_BitcastCompatible) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[6400]{0} parameter(0) ROOT mul = f32[6400]{0} multiply(p0.1, p0.1) } fused_computation_2 { p0.2 = f32[6400]{0} parameter(0) bitcast = f32[1,6400]{1,0} bitcast(p0.2) ROOT convert = f16[1,6400]{1,0} convert(bitcast) } ENTRY entry { p0 = f32[6400]{0} parameter(0) fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1 fusion.2 = f16[1,6400]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_2 ROOT root = (f32[6400]{0}, f16[1,6400]{1,0}) tuple(fusion.1, fusion.2) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Reduce) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[6400]{0} parameter(0) ROOT mul = f32[6400]{0} multiply(p0.1, p0.1) } ENTRY entry { p0 = f32[6400]{0} parameter(0) fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1 const.2 = f32[] constant(0) reduce = f32[] reduce(p0, const.2), dimensions={0}, to_apply=scalar_add ROOT root = (f32[6400]{0}, f32[]) tuple(fusion.1, reduce) })")) .value(); const HloInstruction* fusion = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* reduce = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *reduce)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Elementwise) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[6400]{0} parameter(0) ROOT mul = f32[6400]{0} multiply(p0.1, p0.1) } ENTRY entry { p0 = f32[6400]{0} parameter(0) fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1 const.2 = f32[] constant(1) broadcast = f32[6400]{0} broadcast(const.2), dimensions={} div = f32[6400]{0} divide(p0, broadcast) ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div) })")) .value(); const HloInstruction* fusion = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* div = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *div)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_MultiOutputLoopFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0) mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1) exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1) ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp) } fused_computation_2 { p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0) const.2 = f32[] constant(0) broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={} ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast) } ENTRY entry { p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0) fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1 fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2 gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0 gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1 ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0)->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(2); EXPECT_NE(fusion_1, fusion_2); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_DifferentElementType) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation_1 { p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0) mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1) exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1) ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp) } fused_computation_2 { p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0) const.2 = f32[] constant(0) broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={} add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast) ROOT convert = s32[8,1,5,16,1,1]{5,4,3,2,1,0} convert(add) } ENTRY entry { p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0) fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1 fusion.2 = s32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2 gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0 gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1 ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, s32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0)->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(2); EXPECT_NE(fusion_1, fusion_2); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_UnfusedOps) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY reduce { p0 = f32[32,32,32]{2,1,0} parameter(0) c0 = f32[] constant(0) exp = f32[32,32,32]{2,1,0} exponential(p0) reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp) })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* exp = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_DifferentLayouts) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY reduce { p0 = f32[2,2,2]{2,1,0} parameter(0) p1 = f32[2,2,2]{0,1,2} parameter(1) c0 = f32[] constant(0) exp = f32[2,2,2]{2,1,0} exponential(p0) reduce = f32[2,2]{0,1} reduce(p1, c0), dimensions={2}, to_apply=scalar_add ROOT root = (f32[2,2]{0,1}, f32[2,2,2]{2,1,0}) tuple(reduce, exp) })")) .value(); const HloInstruction* reduce = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* exp = module->entry_computation()->root_instruction()->operand(1); EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*reduce, *exp)); } TEST_F( GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsNotCompatible) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_021_transpose { param_0 = f32[20,20,20]{2,1,0} parameter(0) transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1} ROOT bitcast = f32[8000]{0} bitcast(transpose) } fused_220_transpose { param_0 = f32[20,20,20]{2,1,0} parameter(0) transpose = f32[20,20,20]{2,1,0} transpose(param_0), dimensions={2,1,0} ROOT bitcast = f32[8000]{0} bitcast(transpose) } ENTRY reduce { p0 = f32[20,20,20]{2,1,0} parameter(0) fusion = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_021_transpose fusion.1 = f32[8000]{0} fusion(p0), kind=kInput, calls=fused_220_transpose ROOT root = (f32[8000]{0}, f32[8000]{0}) tuple(fusion, fusion.1) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_FALSE( FusionHeroesAreCompatible(fusion_1->fused_expression_root()->operand(0), fusion_2->fused_expression_root()->operand(0))); EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_SiblingTransposeFusionsCompatible) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_1230_transpose { param_0 = f32[1,20,20]{2,1,0} parameter(0) bitcast.1 = f32[20,2,2,5]{3,2,1,0} bitcast(param_0) transpose = f32[2,2,5,20]{3,2,1,0} transpose(bitcast.1), dimensions={1,2,3,0} ROOT bitcast.2 = f32[400]{0} bitcast(transpose) } fused_021_transpose { param_0 = f32[1,20,20]{2,1,0} parameter(0) transpose = f32[1,20,20]{2,1,0} transpose(param_0), dimensions={0,2,1} ROOT bitcast = f32[400]{0} bitcast(transpose) } ENTRY reduce { p0 = f32[1,20,20]{2,1,0} parameter(0) fusion = f32[400]{0} fusion(p0), kind=kInput, calls=fused_1230_transpose fusion.1 = f32[400]{0} fusion(p0), kind=kInput, calls=fused_021_transpose ROOT root = (f32[400]{0}, f32[400]{0}) tuple(fusion, fusion.1) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_MultiOutputReduceFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_select { p1.1 = f32[2,2,2]{2,1,0} parameter(1) c0 = f32[] constant(0) broadcast = f32[2,2,2]{2,1,0} broadcast(f32[] c0), dimensions={} greater-than = pred[2,2,2]{2,1,0} compare(f32[2,2,2]{2,1,0} p1.1, f32[2,2,2]{2,1,0} broadcast), direction=GT p0.1 = f32[2,2,2]{2,1,0} parameter(0) ROOT select = f32[2,2,2]{2,1,0} select(pred[2,2,2]{2,1,0} greater-than, f32[2,2,2]{2,1,0} p0.1, f32[2,2,2]{2,1,0} broadcast) } fused_reduce { p0.2 = f32[2,2,2]{2,1,0} parameter(0) c1 = f32[] constant(0) r1 = f32[2,2]{1,0} reduce(p0.2, c1), dimensions={2}, to_apply=scalar_add mul = f32[2,2,2]{2,1,0} multiply(p0.2, p0.2) r2 = f32[2,2]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add ROOT tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(r1, r2) } ENTRY reduce { p0 = f32[2,2,2]{2,1,0} parameter(0) p1 = f32[2,2,2]{2,1,0} parameter(1) select = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(select), kind=kInput, calls=fused_reduce gte0 = f32[2,2]{1,0} get-tuple-element(fusion), index=0 gte1 = f32[2,2]{1,0} get-tuple-element(fusion), index=1 ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(gte1, gte1, select) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0)->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1)->operand(0); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_ReduceFusions) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduce_1 { p0.1 = f32[2,2,2]{2,1,0} parameter(0) c0 = f32[] constant(0) ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} p0.1, f32[] c0), dimensions={0}, to_apply=scalar_add } fused_reduce_2 { p0.2 = f32[2,2,2]{2,1,0} parameter(0) mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2, f32[2,2,2]{2,1,0} p0.2) c1 = f32[] constant(0) ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2]{2,1,0} mul, f32[] c1), dimensions={0}, to_apply=scalar_add } ENTRY reduce { p0 = f32[2,2,2]{2,1,0} parameter(0) p1 = f32[2,2,2]{2,1,0} parameter(1) reduce_1 = f32[2,2]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1 reduce_2 = f32[2,2]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2 ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(reduce_1, reduce_2) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_DifferentReduceDimensions) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduce_1 { p0.1 = f32[32,32,32]{2,1,0} parameter(0) c0 = f32[] constant(0) ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} p0.1, f32[] c0), dimensions={0}, to_apply=scalar_add } fused_reduce_2 { p0.2 = f32[32,32,32]{2,1,0} parameter(0) mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2, f32[32,32,32]{2,1,0} p0.2) c1 = f32[] constant(0) ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32]{2,1,0} mul, f32[] c1), dimensions={2}, to_apply=scalar_add } ENTRY reduce { p0 = f32[32,32,32]{2,1,0} parameter(0) p1 = f32[32,32,32]{2,1,0} parameter(1) reduce_1 = f32[32,32]{1,0} fusion(p0), kind=kLoop, calls=fused_reduce_1 reduce_2 = f32[32,32]{1,0} fusion(p1), kind=kLoop, calls=fused_reduce_2 ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(reduce_1, reduce_2) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_NoReductionToVector) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_element_wise { p0.1 = f32[32,32,32]{2,1,0} parameter(0) p1.1 = f32[32,32,32]{2,1,0} parameter(1) ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1) } fused_reduce { p0.2 = f32[32,32,32]{2,1,0} parameter(0) mul = f32[32,32,32]{2,1,0} multiply(f32[32,32,32]{2,1,0} p0.2, f32[32,32,32]{2,1,0} p0.2) broadcast = f32[32,32,32,32]{3,2,1,0} broadcast(mul), dimensions={3,2,1} c1 = f32[] constant(0) ROOT reduce = f32[32,32]{1,0} reduce(f32[32,32,32,32]{3,2,1,0} broadcast, f32[] c1), dimensions={1,3}, to_apply=scalar_add } ENTRY reduce { p0 = f32[32,32,32]{2,1,0} parameter(0) p1 = f32[32,32,32]{2,1,0} parameter(1) element_wise = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise fusion = f32[32,32]{1,0} fusion(element_wise), kind=kLoop, calls=fused_reduce ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(fusion, element_wise) })")) .value(); const HloInstruction* fusion_1 = module->entry_computation()->root_instruction()->operand(0); const HloInstruction* fusion_2 = module->entry_computation()->root_instruction()->operand(1); EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2)); } TEST_F(GpuFusibleTest, IsFusibleAsMultiOutputFusionRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) })") .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*root)); } TEST_F(GpuFusibleTest, ScatterIsNotFusibleAsMultiOutputFusionRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY Scatter { p0 = s32[3,3] parameter(0) operand = s32[3,3] add(p0, p0) p1 = s32[2] parameter(1) indices = s32[2] add(p1, p1) p2 = s32[2,3] parameter(2) updates = s32[2,3] add(p2, p2) ROOT scatter = s32[3,3] scatter(operand, indices, updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })") .value(); const HloInstruction* scatter_inst = module->entry_computation()->root_instruction(); EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*scatter_inst)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionElementwiseAndReduce) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY reduce { p0 = f32[32,32,32]{2,1,0} parameter(0) c0 = f32[] constant(0) exp = f32[32,32,32]{2,1,0} exponential(p0) reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root->operand(0); const HloInstruction* producer = root->operand(1); EXPECT_TRUE(IsProducerMultiOutputFusible(*producer)); EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer)); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionTransposeAndLoopFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_add { p0.1 = f32[32,31,30]{2,1,0} parameter(0) p1.1 = f32[32,31,30]{2,1,0} parameter(1) neg = f32[32,31,30]{2,1,0} negate(p0.1) ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1) } ENTRY reduce { p0 = f32[32,31,30]{2,1,0} parameter(0) p1 = f32[32,30,31]{2,1,0} parameter(1) transpose = f32[32,31,30]{2,1,0} transpose(p1), dimensions={0,2,1} ROOT add = f32[32,31,30]{2,1,0} fusion(p0, transpose), kind=kLoop, calls=fused_add })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root; const HloInstruction* producer = root->operand(1); EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionReduceAndLoopFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_add { p0.1 = f32[32,31,30]{2,1,0} parameter(0) p1.1 = f32[32,31,30]{2,1,0} parameter(1) neg = f32[32,31,30]{2,1,0} negate(p0.1) ROOT add = f32[32,31,30]{2,1,0} add(neg, p1.1) } ENTRY reduce { p0 = f32[32,31,30]{2,1,0} parameter(0) p1 = f32[32,31,30,29]{3,2,1,0} parameter(1) c0 = f32[] constant(0.0) reduce = f32[32,31,30]{2,1,0} reduce(p1, c0), dimensions={3}, to_apply=scalar_add ROOT add = f32[32,31,30]{2,1,0} fusion(p0, reduce), kind=kLoop, calls=fused_add })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root; const HloInstruction* producer = root->operand(1); EXPECT_TRUE(IsProducerConsumerFusible(*producer, *consumer)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduce) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_add { p0.1 = f32[32,32,32]{2,1,0} parameter(0) p1.1 = f32[32,32,32]{2,1,0} parameter(1) ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1) } ENTRY reduce { p0 = f32[32,32,32]{2,1,0} parameter(0) p1 = f32[32,32,32]{2,1,0} parameter(1) c0 = f32[] constant(0) add = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add reduce = f32[32,32]{1,0} reduce(add, c0), dimensions={2}, to_apply=scalar_add ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, add) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root->operand(0); const HloInstruction* producer = root->operand(1); EXPECT_TRUE(IsProducerMultiOutputFusible(*producer)); EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer)); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionLoopFusionAndReduceFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_select { p1.1 = f32[32,32,32]{2,1,0} parameter(1) c0 = f32[] constant(0) broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={} greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1, f32[32,32,32]{2,1,0} broadcast), direction=GT p0.1 = f32[32,32,32]{2,1,0} parameter(0) ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0} greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast) } fused_reduce { p0.2 = f32[32,32,32]{2,1,0} parameter(0) c1 = f32[] constant(0) r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2}, to_apply=scalar_add mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2) r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2) } ENTRY reduce { p0 = f32[32,32,32]{2,1,0} parameter(0) p1 = f32[32,32,32]{2,1,0} parameter(1) select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput, calls=fused_reduce ROOT root = ((f32[32,32]{1,0}, f32[32,32]{1,0}), f32[32,32,32]{2,1,0}) tuple(fusion, select) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root->operand(0); const HloInstruction* producer = root->operand(1); EXPECT_TRUE(IsProducerMultiOutputFusible(*producer)); EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer)); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_element_wise { p0.1 = f32[2,2,2]{2,1,0} parameter(0) p1.1 = f32[2,2,2]{2,1,0} parameter(1) ROOT root = f32[2,2,2]{2,1,0} add(p0.1, p1.1) } fused_reduce { p0.2 = f32[2,2,2]{2,1,0} parameter(0) mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2, f32[2,2,2]{2,1,0} p0.2) broadcast = f32[2,2,2,2]{3,2,1,0} broadcast(mul), dimensions={3,2,1} c1 = f32[] constant(0) ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2,2]{3,2,1,0} broadcast, f32[] c1), dimensions={1,3}, to_apply=scalar_add } ENTRY reduce { p0 = f32[2,2,2]{2,1,0} parameter(0) p1 = f32[2,2,2]{2,1,0} parameter(1) element_wise = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise fusion = f32[2,2]{1,0} fusion(element_wise), kind=kLoop, calls=fused_reduce ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(fusion, element_wise) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root->operand(0); const HloInstruction* producer = root->operand(1); EXPECT_TRUE(IsProducerMultiOutputFusible(*producer)); EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer)); EXPECT_FALSE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionReduceUnfriendlyLoopFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( mixed_input_layouts_computation { p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0) p1.1 = f16[128,1024,33,33]{3,2,1,0} parameter(1) copy = f16[128,1024,33,33]{1,3,2,0} copy(p1.1) slice = f16[128,1024,32,32]{1,3,2,0} slice(copy), slice={[0:128],[0:1024],[0:32],[0:32]} c0 = f16[] constant(0) broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={} greater-than = pred[128,1024,32,32]{1,3,2,0} compare(slice, broadcast), direction=GT ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast) } fused_reduce { p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0) convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2) c0.2 = f32[] constant(0) ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add } ENTRY reduce { p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0) p1 = f16[128,1024,33,33]{3,2,1,0} parameter(1) loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root->operand(0); const HloInstruction* producer = root->operand(1); EXPECT_FALSE(IsProducerMultiOutputFusible(*producer)); EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*consumer)); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*producer, *consumer)); } TEST_F(GpuFusibleTest, ProducerConsumerFusionInPlaceOperation) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( %fusion { %param_0 = s32[4,4]{1,0} parameter(0) %copy = s32[4,4]{0,1} copy(%param_0) ROOT %transpose = s32[4,4]{1,0} transpose(%copy), dimensions={1,0} } ENTRY %main { %param_0 = s32[4,4]{1,0} parameter(0) %constant_0 = s32[] constant(0) %constant_1 = s32[] constant(1) %constant_1x1_1 = s32[1,1] constant({ {1} }) %updated = s32[4,4]{1,0} dynamic-update-slice(%param_0, %constant_1x1_1, %constant_1, %constant_0) %transpose = s32[4,4]{0,1} fusion(%updated), kind=kLoop, calls=fusion ROOT %tuple = tuple(%updated, %transpose) })")) .value(); const HloInstruction* tuple = module->entry_computation()->root_instruction(); EXPECT_EQ(tuple->opcode(), HloOpcode::kTuple); const HloInstruction* dus = tuple->operand(0); EXPECT_EQ(dus->opcode(), HloOpcode::kDynamicUpdateSlice); const HloInstruction* transpose = tuple->operand(1); EXPECT_EQ(transpose->opcode(), HloOpcode::kFusion); EXPECT_FALSE(IsProducerMultiOutputFusible(*dus)); EXPECT_TRUE(IsFusibleAsMultiOutputFusionRoot(*transpose)); EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*dus, *transpose)); } TEST_F(GpuFusibleTest, NonscalarConstantsNotFused) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY BroadcastIntoReduce { constant = f32[16] constant({0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}) broadcast = f32[16,16,16,16]{3,2,1,0} broadcast(constant), dimensions={0} constant.1 = f32[] constant(0) reduce = f32[] reduce(broadcast, constant.1), dimensions={0,1,2,3}, to_apply=add ROOT root = (f32[], f32[], f32[16,16,16,16], f32[16]) tuple(reduce, constant.1, broadcast, constant) })") .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* consumer = root->operand(0); const HloInstruction* producer = root->operand(1); const HloInstruction* consumer2 = root->operand(2); const HloInstruction* producer2 = root->operand(3); EXPECT_FALSE( static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer))); EXPECT_FALSE( static_cast<bool>(IsProducerConsumerFusible(*producer2, *consumer2))); } TEST_F(GpuFusibleTest, FuseLayoutChangingOpWithElementwise) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY entry { p0 = f32[16,16,16,16]{3,2,1,0} parameter(0) copy = f32[16,16,16,16]{0,1,2,3} copy(p0) ROOT add = f32[16,16,16,16]{0,1,2,3} add(copy, copy) })") .value(); const HloInstruction* consumer = module->entry_computation()->root_instruction(); const HloInstruction* producer = consumer->operand(0); EXPECT_TRUE( static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer))); } TEST_F(GpuFusibleTest, FuseReduceWithUnaryElementwise) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY main.12 { Arg_0.1 = f32[2048]{0} parameter(0) constant.4 = f32[] constant(0.0) reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add ROOT exp = f32[] exponential(reduce.10) })")) .value(); const HloInstruction* consumer = module->entry_computation()->root_instruction(); const HloInstruction* producer = consumer->operand(0); EXPECT_TRUE( static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer))); } TEST_F(GpuFusibleTest, DoNotFuseReduceWithRacesWithUnaryElementwise) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY main.12 { Arg_0.1 = f32[196608]{0} parameter(0) constant.4 = f32[] constant(0.0) reduce.10 = f32[] reduce(Arg_0.1, constant.4), dimensions={0}, to_apply=scalar_add ROOT exp = f32[] exponential(reduce.10) })")) .value(); const HloInstruction* consumer = module->entry_computation()->root_instruction(); const HloInstruction* producer = consumer->operand(0); EXPECT_FALSE( static_cast<bool>(IsProducerConsumerFusible(*producer, *consumer))); } TEST_F(GpuFusibleTest, CreatesHeavyComputation_NonfusionInstr) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { p_0 = f32[20,50] parameter(0) constant_1 = f32[] constant(1) reduce-window_1 = f32[21,41] reduce-window(p_0, constant_1), window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add constant_2 = f32[] constant(2) reduce-window_2 = f32[21,41] reduce-window(p_0, constant_2), window={size=20x10 pad=0_20x0_0}, to_apply=scalar_add ROOT root = (f32[21,41], f32[21,41]) tuple(reduce-window_1, reduce-window_2) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* producer = root->operand(0); const HloInstruction* consumer = root->operand(1); EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer)); } TEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_NonfusionInstr) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { p_0 = f32[3,5] parameter(0) constant = f32[] constant(1) broadcast = f32[3, 5] broadcast(f32[] constant), dimensions={} scaled_p_0 = f32[3,5] multiply(f32[3, 5] broadcast, f32[3,5]{1, 0} p_0) p_1 = f32[2,5] parameter(1) reduce-window = f32[3,5] reduce-window(p_1, constant), window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add ROOT root = (f32[3,5], f32[3,5]) tuple(reduce-window, scaled_p_0) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* producer = root->operand(0); const HloInstruction* consumer = root->operand(1); EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer)); } TEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_NonoverlappingReduceWindows) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { p_0 = f32[2,5] parameter(0) constant_1 = f32[] constant(1) reduce-window_1 = f32[3,5] reduce-window(p_0, constant_1), window={size=2x1 pad=0_2x0_0}, to_apply=scalar_add constant_2 = f32[] constant(2) reduce-window_2 = f32[2,3] reduce-window(p_0, constant_2), window={size=2x1 pad=0_2x0_0 stride=2x2}, to_apply=scalar_add ROOT root = (f32[3,5], f32[2,3]) tuple(reduce-window_1, reduce-window_2) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* producer = root->operand(0); const HloInstruction* consumer = root->operand(1); EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer)); } TEST_F(GpuFusibleTest, CreatesHeavyComputation_ReduceWindowGather) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( ENTRY entry { p0 = s32[512,512,2] parameter(0) p1 = f32[1,1,512,512] parameter(1) constant_1 = f32[] constant(0) reduce-window.1 = reduce-window(p1, constant_1), window={size=1x1x16x16 stride=1x1x16x16}, to_apply=scalar_add ROOT ret = gather(reduce-window.1, p0), offset_dims={0,1,2,3}, collapsed_slice_dims={}, start_index_map={1,2}, index_vector_dim=2, slice_sizes={1,1,1,1} })")) .value(); auto gather = module->entry_computation()->root_instruction(); auto reduce_window = gather->operand(0); EXPECT_EQ(gather->opcode(), HloOpcode::kGather); EXPECT_EQ(reduce_window->opcode(), HloOpcode::kReduceWindow); EXPECT_FALSE(IfFusedReadsElementsMultipleTimes(*reduce_window)); EXPECT_TRUE(IsExpensiveToRepeat(*reduce_window)); EXPECT_TRUE(IfFusedReadsElementsMultipleTimes(*gather)); EXPECT_TRUE(CreatesHeavyComputation(*reduce_window, *gather)); } TEST_F(GpuFusibleTest, CreatesHeavyComputation_FusionInstr) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_producer { operand = f32[20,20] parameter(0) constant = f32[] constant(1) ROOT reduce-window = f32[11,11] reduce-window(operand, constant), window={size=20x20 pad=0_10x0_10}, to_apply=scalar_add } fused_consumer { operand_0 = f32[11,11] parameter(0) operand_1 = f32[11,11] parameter(1) constant = f32[] constant(1) reduce-window = f32[11,11] reduce-window(operand_1, constant), window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add ROOT scaled_operand_1 = f32[11,11] multiply(f32[11,11] operand_0, f32[11,11] reduce-window) } ENTRY entry { p0 = f32[20,20] parameter(0) p1 = f32[11,11] parameter(1) producer = f32[11,11] fusion(p0), kind=kLoop, calls=fused_producer consumer = f32[11,11] fusion(p1, producer), kind=kLoop, calls=fused_consumer ROOT root = (f32[11,11], f32[11,11]) tuple(producer, consumer) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* producer = root->operand(0); const HloInstruction* consumer = root->operand(1); EXPECT_TRUE(CreatesHeavyComputation(*producer, *consumer)); } TEST_F(GpuFusibleTest, DoesNotCreateHeavyComputation_FusionInstr) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_producer { p_0 = f32[2,2] parameter(0) constant = f32[] constant(1) ROOT reduce-window = f32[2,2] reduce-window(p_0, constant), window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add } fused_consumer { p_0 = f32[2,2] parameter(0) p_1 = f32[2,2] parameter(1) constant = f32[] constant(1) reduce-window = f32[2,2] reduce-window(p_1, constant), window={size=2x2 pad=0_1x0_1}, to_apply=scalar_add ROOT scaled_p_1 = f32[2,2] multiply(f32[2, 2] p_0, f32[2,2] reduce-window) } ENTRY entry { p_0 = f32[2,2] parameter(0) producer = f32[2,2] fusion(p_0), kind=kLoop, calls=fused_producer consumer = f32[2,2] fusion(producer, p_0), kind=kLoop, calls=fused_consumer ROOT root = (f32[2,2], f32[2,2]) tuple(producer, consumer) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* producer = root->operand(0); const HloInstruction* consumer = root->operand(1); EXPECT_FALSE(CreatesHeavyComputation(*producer, *consumer)); } TEST_F(GpuFusibleTest, ChooseFusionKind) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module ENTRY computation { p = f32[1,5000,6000]{2,1,0} parameter(0) c = f32[1,6000,5000]{2,1,0} transpose(p), dimensions={0,2,1} ROOT r = f32[300,20,5000]{2,1,0} reshape(c) } )") .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* producer = root->operand(0); EXPECT_EQ(ChooseFusionKind(*producer, *root), HloInstruction::FusionKind::kInput); } TEST_F(GpuFusibleTest, GetFusionRoots1) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fusion { p0 = s32[] parameter(0) custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target="my_custom_call" get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0 get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1 ROOT tuple = (bf16[], s32[], s32[]) tuple(get-tuple-element.0, get-tuple-element.1, p0) } ENTRY entry{ p0 = s32[] parameter(0) ROOT fusion = (bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion } )") .value(); auto called_computations = module->entry_computation()->root_instruction()->called_computations(); ASSERT_EQ(called_computations.size(), 1); auto fusion = called_computations.front(); auto roots = GetFusionRoots(*fusion); auto custom_call = fusion->root_instruction()->operand(0)->operand(0); auto parameter = fusion->root_instruction()->operand(2); std::vector<const HloInstruction*> expected_roots{custom_call, custom_call, parameter}; EXPECT_EQ(roots, expected_roots); } TEST_F(GpuFusibleTest, GetFusionRoots2) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fusion { p0 = s32[] parameter(0) custom-call.1 = bf16[] custom-call(p0), custom_call_target="my_custom_call1" custom-call.2 = bf16[] custom-call(p0), custom_call_target="my_custom_call2" ROOT tuple = (bf16[], bf16[], s32[]) tuple(custom-call.1, custom-call.2, p0) } ENTRY entry{ p0 = s32[] parameter(0) ROOT fusion = (bf16[], bf16[], s32[]) fusion(p0), kind=kCustom, calls=fusion } )") .value(); auto called_computations = module->entry_computation()->root_instruction()->called_computations(); ASSERT_EQ(called_computations.size(), 1); auto fusion = called_computations.front(); auto roots = GetFusionRoots(*fusion); auto custom_call1 = fusion->root_instruction()->operand(0); auto custom_call2 = fusion->root_instruction()->operand(1); auto parameter = fusion->root_instruction()->operand(2); std::vector<const HloInstruction*> expected_roots{custom_call1, custom_call2, parameter}; EXPECT_EQ(roots, expected_roots); } TEST_F(GpuFusibleTest, GetFusionRoots3) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fusion { p0 = s32[] parameter(0) custom-call = (bf16[], s32[]) custom-call(p0), custom_call_target="my_custom_call" get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0 custom-call.2 = bf16[] custom-call(p0), custom_call_target="my_custom_call2" get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1 ROOT tuple = (bf16[], bf16[], s32[], s32[]) tuple(get-tuple-element.0, custom-call.2, get-tuple-element.1, p0) } ENTRY entry{ p0 = s32[] parameter(0) ROOT fusion = (bf16[], bf16[], s32[], s32[]) fusion(p0), kind=kCustom, calls=fusion } )") .value(); auto called_computations = module->entry_computation()->root_instruction()->called_computations(); ASSERT_EQ(called_computations.size(), 1); auto fusion = called_computations.front(); auto roots = GetFusionRoots(*fusion); auto custom_call1 = fusion->root_instruction()->operand(0)->operand(0); auto custom_call2 = fusion->root_instruction()->operand(1); auto parameter = fusion->root_instruction()->operand(3); std::vector<const HloInstruction*> expected_roots{custom_call1, custom_call2, custom_call1, parameter}; EXPECT_EQ(roots, expected_roots); } TEST_F(GpuFusibleTest, GetFusionRootsWithGTEMakeTupleSequence) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fusion { p0 = s32[] parameter(0) p1 = s32[32] parameter(1) custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call" get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0 get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1 bitcast = s32[1] bitcast(get-tuple-element.1) dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0) get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2 ROOT tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2) } ENTRY entry{ p0 = s32[] parameter(0) bitcast = s32[32] bitcast(p0) ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion } )") .value(); auto called_computations = module->entry_computation()->root_instruction()->called_computations(); ASSERT_EQ(called_computations.size(), 1); auto fusion = called_computations.front(); auto roots = GetFusionRoots(*fusion); auto custom_call = fusion->root_instruction()->operand(0)->operand(0); auto dus = fusion->root_instruction()->operand(1); std::vector<const HloInstruction*> expected_result{custom_call, dus, custom_call}; EXPECT_EQ(roots, expected_result); } TEST_F(GpuFusibleTest, GetFusionRootsWithMakeTupleGTESequence) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fusion { p0 = s32[] parameter(0) p1 = s32[32] parameter(1) custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call" get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0 get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1 bitcast = s32[1] bitcast(get-tuple-element.1) dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0) get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2 tuple = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2) get-tuple-element.3 = bf16[] get-tuple-element(tuple), index=0 get-tuple-element.4 = u32[] get-tuple-element(tuple), index=2 ROOT tuple2 = (bf16[], s32[32], u32[]) tuple(get-tuple-element.3, dynamic-update-slice, get-tuple-element.4) } ENTRY entry{ p0 = s32[] parameter(0) bitcast = s32[32] bitcast(p0) ROOT fusion = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion } )") .value(); auto called_computations = module->entry_computation()->root_instruction()->called_computations(); ASSERT_EQ(called_computations.size(), 1); auto fusion = called_computations.front(); auto roots = GetFusionRoots(*fusion); auto tuple_inst = fusion->root_instruction()->operand(0)->operand(0); auto custom_call = tuple_inst->operand(0)->operand(0); auto dus = fusion->root_instruction()->operand(1); std::vector<const HloInstruction*> expected_result{custom_call, dus, custom_call}; EXPECT_EQ(roots, expected_result); } TEST_F(GpuFusibleTest, GetFusionRootsWithTupleMultipleSameOperands) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fusion { p1 = s32[32] parameter(0) add0 = s32[32] add(p1, p1) ROOT _ = (s32[32], s32[32]) tuple(add0, add0) } ENTRY entry { p0 = s32[32] parameter(0) ROOT fusion = (s32[32], s32[32]) fusion(p0), kind=kCustom, calls=fusion } )") .value(); auto called_computations = module->entry_computation()->root_instruction()->called_computations(); ASSERT_EQ(called_computations.size(), 1); auto fusion = called_computations.front(); auto roots = GetFusionRoots(*fusion); auto add0 = fusion->root_instruction()->operand(0); EXPECT_THAT(GetFusionRoots(*fusion), ElementsAre(add0, add0)); } TEST_F(GpuFusibleTest, GetFusibleComputations) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_reduce { p0 = f32[128,1024] parameter(0) c0 = f32[] constant(0) ROOT reduce = f32[128]{0} reduce(p0, c0), dimensions={1}, to_apply=scalar_add } body_a { p0 = f32[128,1024] parameter(0) ROOT reduce_fusion = f32[128] fusion(p0), kind=kInput, calls=fused_reduce } body_b { p0 = f32[128,1024] parameter(0) c0 = f32[] constant(0) ROOT bc = f32[128] broadcast(c0), dimensions={} } ENTRY main { p0 = s32[] parameter(0) p1 = f32[128,1024] parameter(1) ROOT conditional = f32[128] conditional(p0, p1, p1), branch_computations={body_a, body_b} })")) .value(); auto fusible = GetFusibleComputations(*module, {}); EXPECT_THAT(fusible, ElementsAre(module->GetComputationWithName("body_a"), module->GetComputationWithName("body_b"), module->entry_computation())); } TEST_F(GpuFusibleTest, GetSharedMemoryUsage) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( wrapped_transpose { p0 = f32[128,1024,2]{2,1,0} parameter(0) ROOT transpose = f32[1024,128,2]{2,1,0} transpose(p0), dimensions={1,0,2} } ENTRY main { p = f32[128,1024,2] parameter(0) ROOT res = f32[1024,128,2]{2,1,0} fusion(p), kind=kInput, calls=wrapped_transpose })")) .value(); auto& debug_options = module->mutable_config().mutable_debug_options(); debug_options.set_xla_gpu_mlir_emitter_level(3); FusionInfoCache cache; auto fusion = module->entry_computation()->root_instruction(); EXPECT_EQ(cache.GetSharedMemoryUsage(*fusion), 32 * 33 * 2 * 4); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_fusible_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c987cf9e-49f9-4548-8911-f4a481f0a4b8
cpp
tensorflow/tensorflow
buffer_allocations
third_party/xla/xla/service/gpu/buffer_allocations.cc
third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc
#include "xla/service/gpu/buffer_allocations.h" #include <cstdint> #include <set> #include "absl/status/status.h" #include "absl/types/span.h" #include "xla/service/buffer_assignment.h" #include "xla/stream_executor/device_memory.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { absl::Status BufferAllocations::TearDown( const std::set<se::DeviceMemoryBase>& live_addresses, absl::Span<const BufferAllocation> allocations) { absl::Status status; const int64_t num_buffers = allocations.size(); for (BufferAllocation::Index i = 0; i < num_buffers; ++i) { const BufferAllocation& allocation = allocations[i]; se::DeviceMemoryBase buffer_address = GetDeviceAddress(allocation.index()); if ((allocation.maybe_live_out() && !live_addresses.count(buffer_address)) || allocation.IsPreallocatedTempBuffer()) { auto dealloc_result = memory_allocator_->Deallocate(device_ordinal_, buffer_address); if (!dealloc_result.ok() && status.ok()) { status = dealloc_result; } } } return status; } se::DeviceMemoryBase BufferAllocations::GetDeviceAddress( BufferAllocation::Index buffer_index) const { CHECK_GE(buffer_index, 0); CHECK_LT(buffer_index, buffers_.size()); return buffers_[buffer_index]; } se::DeviceMemoryBase& BufferAllocations::GetMutableDeviceAddress( BufferAllocation::Index buffer_index) { CHECK_GE(buffer_index, 0); CHECK_LT(buffer_index, buffers_.size()); return buffers_[buffer_index]; } se::DeviceMemoryBase BufferAllocations::GetDeviceAddress( const BufferAllocation::Slice& buffer_slice) const { int64_t index = buffer_slice.index(); se::DeviceMemoryBase base = GetDeviceAddress(index); int64_t offset = buffer_slice.offset(); CHECK_LE(buffer_slice.offset(), base.size()) << "slice offset " << offset << " must be smaller than buffer #" << index << " size " << base.size(); int64_t extent = offset + buffer_slice.size(); CHECK_LE(extent, base.size()) << "slice extent " << extent << " must be smaller than buffer #" << index << " size " << base.size(); return base.GetByteSlice(buffer_slice.offset(), buffer_slice.size()); } } }
#include "xla/backends/cpu/runtime/buffer_allocations.h" #include <cstddef> #include <vector> #include "xla/service/buffer_assignment.h" #include "xla/service/maybe_owning_device_memory.h" #include "xla/stream_executor/device_memory.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla::cpu { namespace { TEST(BufferAllocationsTest, GetDeviceAddress) { std::vector<MaybeOwningDeviceMemory> buffers; std::vector<float> data = {1.0, 2.0, 3.0, 4.0}; size_t size_in_bytes = data.size() * sizeof(float); buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes)); BufferAllocations allocations(buffers); BufferAllocation alloc(0, size_in_bytes, 0); BufferAllocation::Slice slice(&alloc, 2 * sizeof(float), sizeof(float)); TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase alloc_mem, allocations.GetDeviceAddress(0)); EXPECT_EQ(alloc_mem.opaque(), &data[0]); TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase slice_mem, allocations.GetDeviceAddress(slice)); EXPECT_EQ(slice_mem.opaque(), &data[2]); } TEST(BufferAllocationsTest, GetDeviceAddressUnchecked) { std::vector<MaybeOwningDeviceMemory> buffers; std::vector<float> data = {1.0, 2.0, 3.0, 4.0}; size_t size_in_bytes = data.size() * sizeof(float); buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes)); BufferAllocations allocations(buffers); BufferAllocation alloc(0, size_in_bytes, 0); BufferAllocation::Slice slice(&alloc, 2 * sizeof(float), sizeof(float)); se::DeviceMemoryBase alloc_mem = allocations.GetDeviceAddressUnchecked(0); EXPECT_EQ(alloc_mem.opaque(), &data[0]); se::DeviceMemoryBase slice_mem = allocations.GetDeviceAddressUnchecked(slice); EXPECT_EQ(slice_mem.opaque(), &data[2]); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_allocations.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9a2dfbbc-53b8-4988-9ab6-73c85b8aa912
cpp
tensorflow/tensorflow
cudnn_support_utils
third_party/xla/xla/service/gpu/cudnn_support_utils.cc
third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc
#include "xla/service/gpu/cudnn_support_utils.h" #include <cstdint> #include <vector> #include "xla/hlo/ir/hlo_instructions.h" #include "xla/primitive_util.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "xla/window_util.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution( const se::CudaComputeCapability& compute_capability, HloCustomCallInstruction& conv, int vector_size) { TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(&conv)); const Shape& input_shape = conv.operand(0)->shape(); const Shape& kernel_shape = conv.operand(1)->shape(); const Shape& result_shape = conv.shape().tuple_shapes(0); const auto& dnums = conv.convolution_dimension_numbers(); if (vector_size != 4 && vector_size != 32) { VLOG(3) << "Unsupported vector size for integer convolution: " << vector_size; return false; } if ((vector_size == 32 && !compute_capability.IsAtLeast(7, 5)) || !compute_capability.IsAtLeast(6, 1)) { VLOG(3) << "Compute capability " << compute_capability.ToString() << " is not sufficent for int8x" << vector_size << " vectorization."; return false; } if (kind != CudnnConvKind::kForward && kind != CudnnConvKind::kForwardActivation) { VLOG(3) << "Convolution kind is not forward or foward-activation: " << conv.ToString(); return false; } if (!primitive_util::IsIntegralType(input_shape.element_type()) || !primitive_util::IsIntegralType(kernel_shape.element_type())) { VLOG(3) << "Convolution does not accept integer inputs/weights: " << conv.ToString(); return false; } if (dnums.input_spatial_dimensions().size() != 2 || dnums.kernel_spatial_dimensions().size() != 2 || dnums.output_spatial_dimensions().size() != 2) { VLOG(3) << "Convolution is not 2D: " << conv.ToString(); return false; } if (vector_size == 32 && !primitive_util::IsIntegralType(result_shape.element_type())) { VLOG(3) << "int8x32 convolutions only support integer output: " << conv.ToString(); return false; } if (vector_size == 32) { int64_t W = input_shape.dimensions(dnums.input_spatial_dimensions()[0]); int64_t H = input_shape.dimensions(dnums.input_spatial_dimensions()[1]); int64_t R = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[0]); int64_t S = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[1]); const int64_t dilationW = conv.window().dimensions()[0].base_dilation(); const int64_t dilationH = conv.window().dimensions()[1].base_dilation(); if ((W <= (R - 1) * dilationW) || (H <= (S - 1) * dilationH)) { VLOG(3) << "Conv spatial filter/input dimensions are too small for " "vecotrized int8x32 convolution: " << conv.ToString(); return false; } } if (window_util::HasDilation(conv.window())) { VLOG(3) << "Vectorized integer convolutions do not support dilation: " << conv.ToString(); return false; } return true; } absl::StatusOr<CudnnReorderTransposeConfig> CudnnInferTransposeForFilterReordering( const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers) { if (shape.rank() != 4 && shape.rank() != 5) { return Internal("Filter shape has unexpected rank."); } const int64_t dO = dimension_numbers.kernel_output_feature_dimension(); const int64_t dI = dimension_numbers.kernel_input_feature_dimension(); const int64_t dH = dimension_numbers.kernel_spatial_dimensions().at(0); const int64_t dW = dimension_numbers.kernel_spatial_dimensions().at(1); bool revectorize = shape.rank() == 5; const int64_t dZ = revectorize ? 10 - dO - dI - dH - dW : -1; const int64_t vsize = revectorize ? shape.dimensions(dZ) : 1; if (shape.dimensions(dO) % 32 != 0 || shape.dimensions(dI) % (32 / vsize) != 0 || (revectorize && vsize != 4 && vsize != 32)) { return Internal("Filter shape is not vectorizable."); } std::vector<int64_t> output = { shape.dimensions(dO), shape.dimensions(dI) / (32 / vsize), shape.dimensions(dH), shape.dimensions(dW), 32}; Shape output_shape = ShapeUtil::MakeShape(shape.element_type(), output); auto calc_index = [&](int dim) { bool split_v = vsize == 32; return (revectorize ? (dI < dim ? 2 - split_v : 0) + (dZ < dim ? 1 + split_v : 0) : (dI < dim ? 3 : 0)) + (dO < dim ? 3 : 0) + (dH < dim) + (dW < dim); }; int idx_O = calc_index(dO); int idx_I = calc_index(dI); int idx_H = calc_index(dH); int idx_W = calc_index(dW); int idx_Y = vsize == 32 ? calc_index(dZ) : idx_I + 1; int idx_Z = vsize == 4 ? calc_index(dZ) : vsize == 32 ? idx_Y + 1 : idx_I + 2; std::vector<int64_t> dims(8); dims[idx_O] = shape.dimensions(dO) / 8; dims[idx_O + 1] = 4; dims[idx_O + 2] = 2; dims[idx_I] = shape.dimensions(dI) / (32 / vsize); dims[idx_Y] = 8; dims[idx_Z] = 4; dims[idx_H] = shape.dimensions(dH); dims[idx_W] = shape.dimensions(dW); Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims); std::vector<int64_t> permutation = {idx_I, idx_H, idx_W, idx_O, idx_O + 2, idx_Y, idx_O + 1, idx_Z}; return CudnnReorderTransposeConfig{split_shape, output_shape, permutation}; } absl::StatusOr<CudnnReorderTransposeConfig> CudnnInferTransposeForBiasReordering(const Shape& shape) { if (shape.rank() != 1) { return Internal("Bias shape has unexpected rank."); } if (shape.dimensions(0) % 32 != 0) { return Internal("Bias shape is not vectorizable."); } std::vector<int64_t> dims = {shape.dimensions(0) / 32, 4, 2, 4}; Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims); std::vector<int64_t> permutation = {0, 2, 1, 3}; return CudnnReorderTransposeConfig{split_shape, shape, permutation}; } bool IsWorkspaceAllocationRoot(const HloInstruction& root) { return root.IsRoot() && root.opcode() == HloOpcode::kTuple && root.operand_count() == 2 && root.operand(1)->IsCustomCall(kWorkspaceAllocationCustomCallTarget) && root.operand(1)->operand_count() == 0; } } }
#include "xla/service/gpu/cudnn_support_utils.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <memory> #include <string> #include <tuple> #include <vector> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/hlo_parser.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::tsl::testing::IsOkAndHolds; class CudnnSupportUtilsTest : public HloTestBase { public: absl::StatusOr<HloCustomCallInstruction*> GetCustomCall( xla::VerifiedHloModule* module, absl::string_view target) { HloCustomCallInstruction* call = nullptr; for (HloComputation* comp : module->MakeNonfusionComputations()) { for (HloInstruction* inst : comp->instructions()) { if (inst->IsCustomCall(target)) { VLOG(1) << inst->ToString(); if (call != nullptr) { return tsl::errors::FailedPrecondition( "Found more than one custom call."); } call = Cast<HloCustomCallInstruction>(inst); } } } if (call == nullptr) { return tsl::errors::FailedPrecondition( "Did not find any matching custom call."); } return call; } }; TEST_F(CudnnSupportUtilsTest, CudnnSupportsOptimizedIntegerConvolutionCheckVectorSize) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[8,10,10,128] parameter(0) filter = s8[2,2,128,128] parameter(1) ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); HloCustomCallInstruction* conv; TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(module.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(true)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(true)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 7), IsOkAndHolds(false)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 1), IsOkAndHolds(false)); } TEST_F(CudnnSupportUtilsTest, CudnnSupportsOptimizedIntegerConvolutionCheckComputeCapability) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[8,10,10,128] parameter(0) filter = s8[2,2,128,128] parameter(1) ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); HloCustomCallInstruction* conv; TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(module.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 0}, *conv, 4), IsOkAndHolds(false)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 1}, *conv, 4), IsOkAndHolds(true)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 4}, *conv, 32), IsOkAndHolds(false)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(true)); } TEST_F(CudnnSupportUtilsTest, CudnnSupportsOptimizedIntegerConvolutionCheckKind) { auto moduleFwd = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[32,10,10,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); HloCustomCallInstruction* conv; TF_ASSERT_OK_AND_ASSIGN( conv, GetCustomCall(moduleFwd.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(true)); auto moduleBwdFilter = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f16[10,20,30,41] parameter(0) output = f16[10,20,30,40] parameter(1) result = (f16[2,2,41,40], u8[0]) custom-call(input, output), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBackwardFilter" ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0 })") .value(); TF_ASSERT_OK_AND_ASSIGN( conv, GetCustomCall(moduleBwdFilter.get(), "__cudnn$convBackwardFilter")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(false)); auto moduleBwdInput = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { output = f16[10,20,30,40] parameter(0) filter = f16[2,2,41,40] parameter(1) result = (f16[10,20,30,41], u8[0]) custom-call(output, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBackwardInput" ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0 })") .value(); TF_ASSERT_OK_AND_ASSIGN( conv, GetCustomCall(moduleBwdInput.get(), "__cudnn$convBackwardInput")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(false)); } TEST_F(CudnnSupportUtilsTest, CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckTypes) { auto moduleS8InOut = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[32,10,10,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); HloCustomCallInstruction* conv; TF_ASSERT_OK_AND_ASSIGN( conv, GetCustomCall(moduleS8InOut.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(true)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(true)); auto moduleS8InF32Out = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[32,10,10,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN( conv, GetCustomCall(moduleS8InF32Out.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(true)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(false)); auto moduleF32InF32Out = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f32[32,10,10,64] parameter(0) filter = f32[2,2,64,128] parameter(1) ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN( conv, GetCustomCall(moduleF32InF32Out.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(false)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(false)); } TEST_F(CudnnSupportUtilsTest, CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDims) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[32,10,10,10,64] parameter(0) filter = s8[2,2,2,64,128] parameter(1) ROOT result = (s8[32,10,10,10,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b012f_012io->b012f, custom_call_target="__cudnn$convForward" })") .value(); HloCustomCallInstruction* conv; TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(module.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(false)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(false)); } TEST_F(CudnnSupportUtilsTest, CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDilation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[32,10,10,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (s8[32,20,20,128], u8[0]) custom-call(input, filter), window={size=2x2 rhs_dilate=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); HloCustomCallInstruction* conv; TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(module.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(false)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(false)); } TEST_F(CudnnSupportUtilsTest, CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckAlgo1Dims) { auto moduleFilterCoversInput = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[32,2,2,64] parameter(0) filter = s8[3,3,64,128] parameter(1) ROOT result = (s8[32,2,2,128], u8[0]) custom-call(input, filter), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); HloCustomCallInstruction* conv; TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterCoversInput.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(true)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(false)); auto moduleFilterAlmostCoversInput = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[32,3,3,64] parameter(0) filter = s8[3,3,64,128] parameter(1) ROOT result = (s8[32,3,3,128], u8[0]) custom-call(input, filter), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterAlmostCoversInput.get(), "__cudnn$convForward")); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4), IsOkAndHolds(true)); EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32), IsOkAndHolds(true)); } class ReorderFilterRank4Test : public ::testing::TestWithParam<std::string> {}; TEST_P(ReorderFilterRank4Test, InferTransposeRank4) { auto input_dims = GetParam(); size_t dI = input_dims.find('i'); size_t dO = input_dims.find('o'); size_t dH = input_dims.find('0'); size_t dW = input_dims.find('1'); ConvolutionDimensionNumbers dnums; dnums.set_kernel_input_feature_dimension(dI); dnums.set_kernel_output_feature_dimension(dO); dnums.add_kernel_spatial_dimensions(dH); dnums.add_kernel_spatial_dimensions(dW); int64_t shape_dims[4] = {0, 0, 0, 0}; shape_dims[dI] = 224; shape_dims[dO] = 96; shape_dims[dH] = 5; shape_dims[dW] = 3; Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims)); auto input = HloInstruction::CreateParameter(0, shape, "input"); auto filter = HloInstruction::CreateParameter(1, shape, "filter"); TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config, CudnnInferTransposeForFilterReordering(shape, dnums)); EXPECT_THAT(inferred_config.result_shape.dimensions(), ::testing::ElementsAre(96, 7, 5, 3, 32)); Shape reshaped = ShapeUtil::PermuteDimensions( inferred_config.permutation, inferred_config.transpose_shape); EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4)); EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1); EXPECT_EQ(inferred_config.permutation[7], inferred_config.permutation[5] + 1); } std::vector<std::string> GeneratePermutations(std::string input_dims) { std::sort(input_dims.begin(), input_dims.end()); std::vector<std::string> permutations; do { permutations.push_back(input_dims); } while (std::next_permutation(input_dims.begin(), input_dims.end())); return permutations; } INSTANTIATE_TEST_SUITE_P(ReorderTestSuite, ReorderFilterRank4Test, ::testing::ValuesIn(GeneratePermutations("01io"))); class ReorderFilterRank5Test : public ::testing::TestWithParam<std::tuple<std::string, int>> {}; TEST_P(ReorderFilterRank5Test, InferTransposeRank5) { auto [input_dims, vsize] = GetParam(); size_t dI = input_dims.find('i'); size_t dO = input_dims.find('o'); size_t dH = input_dims.find('0'); size_t dW = input_dims.find('1'); ConvolutionDimensionNumbers dnums; dnums.set_kernel_input_feature_dimension(dI); dnums.set_kernel_output_feature_dimension(dO); dnums.add_kernel_spatial_dimensions(dH); dnums.add_kernel_spatial_dimensions(dW); int64_t shape_dims[5] = {vsize, vsize, vsize, vsize, vsize}; shape_dims[dI] = 224 / vsize; shape_dims[dO] = 96; shape_dims[dH] = 5; shape_dims[dW] = 3; Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims)); auto input = HloInstruction::CreateParameter(0, shape, "input"); auto filter = HloInstruction::CreateParameter(1, shape, "filter"); TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config, CudnnInferTransposeForFilterReordering(shape, dnums)); EXPECT_THAT(inferred_config.result_shape.dimensions(), ::testing::ElementsAre(96, 7, 5, 3, 32)); Shape reshaped = ShapeUtil::PermuteDimensions( inferred_config.permutation, inferred_config.transpose_shape); EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4)); EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1); } INSTANTIATE_TEST_SUITE_P( ReorderTestSuite, ReorderFilterRank5Test, ::testing::Combine(::testing::ValuesIn(GeneratePermutations("01?io")), ::testing::Values(4, 32))); class ReorderBiasTest : public ::testing::Test {}; TEST_F(ReorderBiasTest, InferTranspose) { Shape shape = ShapeUtil::MakeShape(U8, {96}); auto bias = HloInstruction::CreateParameter(2, shape, "bias"); Shape unused = ShapeUtil::MakeNil(); auto input = HloInstruction::CreateParameter(0, unused, "input"); auto filter = HloInstruction::CreateParameter(1, unused, "filter"); TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config, CudnnInferTransposeForBiasReordering(shape)); Shape reshaped = ShapeUtil::PermuteDimensions( inferred_config.permutation, inferred_config.transpose_shape); EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(3, 2, 4, 4)); EXPECT_EQ(inferred_config.permutation[2], 1); EXPECT_EQ(inferred_config.permutation[3], 3); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/cudnn_support_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
351b5a0b-c4a1-46de-8a0c-dd47539046d9
cpp
tensorflow/tensorflow
split_k_gemm_rewriter
third_party/xla/xla/service/gpu/split_k_gemm_rewriter.cc
third_party/xla/xla/service/gpu/split_k_gemm_rewriter_test.cc
#include "xla/service/gpu/split_k_gemm_rewriter.h" #include <cmath> #include <cstdint> #include <iterator> #include <stack> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/cord.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/layout.h" #include "xla/literal_util.h" #include "xla/service/gpu/fusions/triton/triton_support_legacy.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/gpu/triton_tiling_propagation.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { bool HasDivisibleSuffixAllowingSplit(const absl::Span<int64_t const> span, const int64_t divisor) { CHECK_GE(divisor, 1); int64_t product = 1; for (auto it = span.crbegin(); it != span.crend(); ++it) { product *= *it; if (product % divisor == 0) { return true; } if (divisor % product != 0) { return false; } } return false; } namespace { void CopyIncrementingAboveThreshold( const tsl::protobuf::RepeatedField<int64_t>& source, tsl::protobuf::RepeatedField<int64_t>& destination, const int threshold) { destination.Reserve(source.size()); for (int64_t x : source) { if (x >= threshold) { ++x; } destination.Add(x); } } void CopyIncrementingAboveThreshold(absl::Span<const int64_t> source, DimensionVector& destination, const int threshold) { destination.reserve(source.size()); for (int64_t x : source) { if (x >= threshold) { ++x; } destination.push_back(x); } } absl::Status UncompilableMatmul(absl::string_view explanation) { absl::Status s = absl::CancelledError(explanation); s.SetPayload(kUncompilableFusion, absl::Cord(explanation)); return s; } absl::StatusOr<HloInstruction*> MakeSparseMetaOperand( HloDotInstruction& dot, const TritonGemmConfig& config) { CHECK_EQ(dot.sparse_operands(), 1); CHECK_EQ(dot.sparsity().front().index(), 0); HloInstruction* meta = dot.mutable_operand(2); const Shape& shape = meta->shape(); if (shape.dimensions().back() % config.split_k != 0) { return UncompilableMatmul("Sparsity metadata has incorrect shape."); } std::vector<int64_t> dimensions(shape.dimensions().begin(), shape.dimensions().end() - 1); dimensions.push_back(config.split_k); dimensions.push_back(shape.dimensions().back() / config.split_k); Shape new_shape = ShapeUtil::MakeShapeWithDescendingLayout( shape.element_type(), dimensions); return MakeBitcastHlo(meta, new_shape); } } absl::StatusOr<HloInstruction*> MakeSplitKOperand( HloInstruction& dot, const TritonFusionAnalysis& analysis, const TritonGemmConfig& config, const int64_t contracting_dim_idx, const int operand_number) { HloInstruction* operand = dot.mutable_operand(operand_number); const int64_t k = operand->shape().dimensions(contracting_dim_idx); const bool need_padding = k % config.split_k != 0; TritonFusionAnalysis::Scope scope = (operand_number == 0) ? TritonFusionAnalysis::Scope::LHS : TritonFusionAnalysis::Scope::RHS; auto check_if_supported = [&](const HloInstruction& hlo, bool check_divisibility) { const TensorIterationSpec::DimIterationSpec* spec = analysis.IterSpec(scope, &hlo, contracting_dim_idx); if (spec == nullptr) { return absl::OkStatus(); } if (spec->size() != 1) { return UncompilableMatmul("Unsupported case."); } const TensorIterationSpec::IterationSpecFragment& fragment = spec->at(0); if (fragment.is_sliced()) { return UncompilableMatmul( "Sliced contracting dimension is not supported yet."); } if (check_divisibility && !HasDivisibleSuffixAllowingSplit( fragment.subfragments, config.split_k)) { return UncompilableMatmul("Contracting dimension is too fragmented."); } if (config.split_k > ceil(1.0 * fragment.count / config.block_k)) { return UncompilableMatmul( "Too small divisible part of the contracting dimension."); } return absl::OkStatus(); }; TF_RETURN_IF_ERROR( check_if_supported(*operand, !need_padding)); for (const HloInstruction* param : analysis.ScopeParameters(scope)) { TF_RETURN_IF_ERROR( check_if_supported(*param, !need_padding)); } if (need_padding) { HloInstruction* const zero = dot.parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(operand->shape().element_type()))); PaddingConfig padding_config = MakeNoPaddingConfig(operand->shape().rank()); padding_config.mutable_dimensions(contracting_dim_idx) ->set_edge_padding_high(config.split_k - k % config.split_k); TF_ASSIGN_OR_RETURN(HloInstruction * pad, MakePadHlo(operand, zero, padding_config)); *pad->mutable_shape()->mutable_layout() = operand->shape().layout(); operand = pad; } CHECK_GE(operand->shape().dimensions(contracting_dim_idx), config.split_k); const Shape& shape = operand->shape(); Shape new_shape(shape.element_type(), {}, {}, {}); for (int i = 0; i < shape.rank(); ++i) { const int64_t dimension_size = shape.dimensions(i); if (i == contracting_dim_idx) { new_shape.add_dimensions(config.split_k); new_shape.add_dimensions(dimension_size / config.split_k); } else { new_shape.add_dimensions(dimension_size); } } Layout* new_layout = new_shape.mutable_layout(); for (int64_t logical_dim_idx : shape.layout().minor_to_major()) { if (logical_dim_idx >= contracting_dim_idx) { new_layout->add_minor_to_major(logical_dim_idx + 1); } if (logical_dim_idx <= contracting_dim_idx) { new_layout->add_minor_to_major(logical_dim_idx); } } return MakeBitcastHlo(operand, new_shape); } absl::Status MakeDotComputationSplitKBatch( HloComputation* computation, const TritonGemmConfig& config, bool disable_reduced_precision_reduction) { HloDotInstruction* dot = Cast<HloDotInstruction>( hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot)); TF_ASSIGN_OR_RETURN(const auto analysis, TritonFusionAnalysis::Execute(*computation)); const DotDimensionNumbers& old_dim_numbers = dot->dot_dimension_numbers(); DotDimensionNumbers new_dim_numbers; TF_ASSIGN_OR_RETURN(const int64_t lhs_contracting_idx, ContractingDimensionIndex(*dot, 0)); CopyIncrementingAboveThreshold( old_dim_numbers.lhs_contracting_dimensions(), *new_dim_numbers.mutable_lhs_contracting_dimensions(), lhs_contracting_idx); new_dim_numbers.mutable_lhs_batch_dimensions()->Add(lhs_contracting_idx); CopyIncrementingAboveThreshold( old_dim_numbers.lhs_batch_dimensions(), *new_dim_numbers.mutable_lhs_batch_dimensions(), lhs_contracting_idx); TF_ASSIGN_OR_RETURN(const int64_t rhs_contracting_idx, ContractingDimensionIndex(*dot, 1)); CopyIncrementingAboveThreshold( old_dim_numbers.rhs_contracting_dimensions(), *new_dim_numbers.mutable_rhs_contracting_dimensions(), rhs_contracting_idx); new_dim_numbers.mutable_rhs_batch_dimensions()->Add(rhs_contracting_idx); CopyIncrementingAboveThreshold( old_dim_numbers.rhs_batch_dimensions(), *new_dim_numbers.mutable_rhs_batch_dimensions(), rhs_contracting_idx); if (dot->sparse_operands()) { if (dot->sparsity().size() != 1 || dot->sparsity().front().index() != 0) { return UncompilableMatmul("Sparsity is only supported on left operand."); } } std::stack<HloInstruction*> to_process; absl::flat_hash_set<HloInstruction*> to_process_set; HloInstruction* current = dot; do { to_process.push(current); CHECK(to_process_set.insert(current).second); if (current->users().empty()) { break; } CHECK_EQ(current->user_count(), 1); current = current->users()[0]; if (!legacy_triton::IsDistributiveOverAddition(*current)) { return Cancelled("Operation non-distributive over addition after dot."); } } while (true); bool did_pad = false; while (!to_process.empty()) { HloInstruction* current = to_process.top(); to_process.pop(); HloInstruction* expanded; if (current == dot) { TF_ASSIGN_OR_RETURN( HloInstruction * lhs, MakeSplitKOperand(*dot, analysis, config, lhs_contracting_idx, 0)); TF_ASSIGN_OR_RETURN( HloInstruction * rhs, MakeSplitKOperand(*dot, analysis, config, rhs_contracting_idx, 1)); if (lhs->operand(0)->opcode() == HloOpcode::kPad) { CHECK_EQ(rhs->operand(0)->opcode(), HloOpcode::kPad); did_pad = true; } std::vector<SparsityDescriptor> sparsity(dot->sparsity().begin(), dot->sparsity().end()); std::vector<HloInstruction*> sparse_meta(sparsity.size()); for (int i = 0; i < sparsity.size(); ++i) { sparsity[i].set_dimension(sparsity[i].dimension() + 1); TF_ASSIGN_OR_RETURN(sparse_meta[i], MakeSparseMetaOperand(*dot, config)); } expanded = MakeDotHlo(lhs, rhs, new_dim_numbers, dot->precision_config(), dot->shape().element_type(), sparsity, sparse_meta) .value(); expanded->mutable_shape()->mutable_layout()->clear_minor_to_major(); CopyIncrementingAboveThreshold(dot->shape().layout().minor_to_major(), *expanded->mutable_shape() ->mutable_layout() ->mutable_minor_to_major(), 0); expanded->mutable_shape()->mutable_layout()->add_minor_to_major(0); dot->SetupDerivedInstruction(expanded); } else { expanded = computation->AddInstruction(current->CloneWithNewShape( ShapeUtil::PrependMajorDimension(config.split_k, current->shape()))); if (expanded->opcode() == HloOpcode::kTranspose) { const auto* old_transpose = Cast<HloTransposeInstruction>(current); auto* new_transpose = Cast<HloTransposeInstruction>(expanded); new_transpose->mutable_dimensions()->clear(); new_transpose->mutable_dimensions()->reserve( new_transpose->shape().rank()); new_transpose->mutable_dimensions()->push_back(0); for (const int64_t dim : old_transpose->dimensions()) { new_transpose->mutable_dimensions()->push_back(dim + 1); } } } TF_RETURN_IF_ERROR(current->ReplaceAllUsesWithDifferentShape(expanded)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(current)); if (current == dot) { continue; } for (int i = 0; i < expanded->operands().size(); ++i) { HloInstruction* operand = expanded->mutable_operand(i); if (!to_process_set.contains(operand)) { std::vector<int64_t> broadcast_dimensions(operand->shape().rank()); absl::c_iota(broadcast_dimensions, 1); TF_RETURN_IF_ERROR(expanded->ReplaceOperandWithDifferentShape( i, MakeBroadcastHlo(operand, broadcast_dimensions, ShapeUtil::PrependMajorDimension( config.split_k, operand->shape())))); } } } if (disable_reduced_precision_reduction) { PrimitiveType output_type = computation->root_instruction()->shape().element_type(); PrimitiveType accumulator_type = output_type == PrimitiveType::F64 ? PrimitiveType::F64 : PrimitiveType::F32; computation->root_instruction()->mutable_shape()->set_element_type( accumulator_type); } if (did_pad) { TF_RETURN_IF_ERROR( TritonFusionAnalysis::Execute(*computation, config.split_k).status()); } return absl::OkStatus(); } absl::Status MakeDotSplitKBatch(HloInstruction* dot_fusion, const TritonGemmConfig& config) { CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion); if (dot_fusion->shape().IsTuple()) { return Unimplemented("Tuple output is not supported with split-K yet."); } const bool disable_reduced_precision_reduction = dot_fusion->GetModule() ->config() .debug_options() .xla_gpu_triton_gemm_disable_reduced_precision_reduction(); const PrimitiveType output_type = dot_fusion->shape().element_type(); const Layout output_layout = dot_fusion->shape().layout(); TF_RETURN_IF_ERROR(MakeDotComputationSplitKBatch( dot_fusion->fused_instructions_computation(), config, disable_reduced_precision_reduction)); const HloInstruction* root = dot_fusion->fused_expression_root(); *dot_fusion->mutable_shape() = root->shape(); HloInstruction* zero = dot_fusion->parent()->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(root->shape().element_type()))); TF_ASSIGN_OR_RETURN(HloInstruction * reduce, MakeReduceHlo(dot_fusion, zero, {0}, HloOpcode::kAdd, &dot_fusion->metadata())); *reduce->mutable_shape()->mutable_layout() = output_layout; if (dot_fusion->IsRoot()) { dot_fusion->parent()->set_root_instruction(reduce, true); } else { TF_RETURN_IF_ERROR(dot_fusion->ReplaceAllUsesWithDifferentShape(reduce)); } if (disable_reduced_precision_reduction) { HloInstruction* convert = MakeConvertToHlo(reduce, output_type); if (reduce->IsRoot()) { reduce->parent()->set_root_instruction(convert, true); } else { TF_RETURN_IF_ERROR(reduce->ReplaceAllUsesWithDifferentShape(convert)); } } return absl::OkStatus(); } } }
#include "xla/service/gpu/split_k_gemm_rewriter.h" #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/hlo_verifier.h" #include "xla/service/layout_assignment.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::FieldsAre; namespace m = ::xla::match; TEST(HasDivisibleSuffixAllowingSplitTest, AllTests) { EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({1}, 1)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2}, 2)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 2}, 2)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({2, 3, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({15, 2}, 6)); EXPECT_TRUE(HasDivisibleSuffixAllowingSplit({3, 15, 2}, 6)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({}, 1)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({1}, 2)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({3}, 2)); EXPECT_FALSE(HasDivisibleSuffixAllowingSplit({2, 3}, 2)); } using SplitKTest = HloTestBase; TEST_F(SplitKTest, MakeSplitK) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,128]{1,0} reshape(copy.1) convert.8 = bf16[480,128]{1,0} convert(reshape.5) parameter_1 = bf16[16,128]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,128,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm", metadata={op_name="foo"} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kReduce); EXPECT_EQ(root->metadata().op_name(), "foo"); } TEST_F(SplitKTest, MakeSplitKWithOutputFusion) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) d = f16[480,16]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} c = bf16[] constant(123) n = bf16[] negate(c) bc = bf16[480,16]{1,0} broadcast(n) cv = bf16[480,16]{1,0} convert(d) ROOT a = bf16[480,16]{1,0} multiply(bc, cv) } ENTRY e { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); } TEST_F(SplitKTest, PreventSplitKWithNonDistributiveOperations) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) d = f16[480,16]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} c = f32[480,16]{1,0} convert(d) ROOT s = f32[480,16]{1,0} tanh(c) } ENTRY e { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) ROOT fusion = f32[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); EXPECT_THAT(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs( tsl::error::CANCELLED, absl::StrFormat( "Operation non-distributive over addition after dot."))); } TEST_F(SplitKTest, MakeSplitKWithNonDivisibleDimensionSize) { constexpr absl::string_view kHloText = R"( t { c1 = s32[] constant(1) bc1 = s32[31]{0} broadcast(c1), dimensions={} p0 = s32[31]{0} parameter(0) cmp = pred[31]{0} compare(bc1, p0), direction=EQ cvt = f32[31]{0} convert(cmp) bc2 = f32[17,31]{1,0} broadcast(cvt), dimensions={1} c0 = f32[] constant(0) bc0 = f32[17,16]{1,0} broadcast(c0), dimensions={} ROOT dot = f32[31,16]{1,0} dot(bc2, bc0), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = s32[31]{0} parameter(0) ROOT r = f32[31,16]{1,0} fusion(p0), kind=kCustom, calls=t, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 2, 1, 2); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, AvoidSplitKWithSlicedContractingDimension) { const std::string hlo_text = R"( t { p0 = f16[32,1234] parameter(0) s0 = f16[32,256] slice(p0), slice={[0:32], [41:297]} p1 = f16[256,768] parameter(1) ROOT d = f16[32,768] dot(s0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f16[32,1234] parameter(0) p1 = f16[256,768] parameter(1) ROOT r = f16[32,768] fusion(p0, p1), kind=kCustom, calls=t, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 2, 1, 2); EXPECT_THAT(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs( tsl::error::CANCELLED, absl::StrFormat( "Sliced contracting dimension is not supported yet."))); } TEST_F(SplitKTest, MakeSplitKWithNonStandardOutputLayout) { const std::string kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,128]{1,0} reshape(copy.1) convert.8 = bf16[480,128]{1,0} convert(reshape.5) parameter_1 = bf16[16,128]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{0,1} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,128,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{0,1} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); EXPECT_EQ(module->entry_computation()->root_instruction()->shape().layout(), Layout({0, 1})); } TEST_F(SplitKTest, MakeSplitKWithExistingBatchDim) { const std::string hlo_text = R"( HloModule m triton_gemm_dot.24 { parameter_1 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1) bitcast.3 = bf16[800,5,128]{2,1,0} bitcast(parameter_1) convert.3 = f32[800,5,128]{2,1,0} convert(bitcast.3) parameter_0 = f32[1,5,700,800]{3,2,1,0} parameter(0) bitcast.2 = f32[5,700,800]{2,1,0} bitcast(parameter_0) ROOT dot.26 = f32[5,128,700]{2,1,0} dot(convert.3, bitcast.2), lhs_batch_dims={1}, lhs_contracting_dims={0}, rhs_batch_dims={0}, rhs_contracting_dims={2} } ENTRY e { tmp_3 = f32[1,5,700,800]{3,2,1,0} parameter(0) tmp_0 = bf16[1,1,800,5,128]{4,3,2,1,0} parameter(1) ROOT triton_gemm_dot.24 = f32[5,128,700]{2,1,0} fusion(tmp_3, tmp_0), kind=kCustom, calls=triton_gemm_dot.24, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(32, 64, 64, 8, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); } TEST_F(SplitKTest, SupportsIndivisible) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,129,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,129]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,129]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,129]{1,0} reshape(copy.1) convert.8 = bf16[480,129]{1,0} convert(reshape.5) parameter_1 = bf16[16,129]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,129,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,129]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK4) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,129]{1,0} parameter(0) convert_0 = bf16[480,129]{1,0} convert(parameter_0) parameter_1 = bf16[16,129]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,129]{1,0} parameter(0) p1 = bf16[16,129]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleWithCustomLayout) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,129]{0,1} parameter(0) convert_0 = bf16[480,129]{0,1} convert(parameter_0) parameter_1 = bf16[16,129]{0,1} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,129]{0,1} parameter(0) p1 = bf16[16,129]{0,1} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); constexpr TritonGemmConfig kConfig(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), kConfig)); TF_EXPECT_OK(HloVerifier(true, true, LayoutAssignment::InstructionCanChangeLayout) .Run(module.get()) .status()); } TEST_F(SplitKTest, SupportsIndivisibleSimpleSplitK16) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,255]{1,0} parameter(0) convert_0 = bf16[480,255]{1,0} convert(parameter_0) parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,255]{1,0} parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleWithTranspose) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[480,255]{1,0} parameter(0) convert_0 = bf16[480,255]{1,0} convert(parameter_0) transpose_0 = bf16[255,480]{1,0} transpose(convert_0), dimensions={1,0} parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(transpose_0, parameter_1), lhs_contracting_dims={0}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[480,255]{1,0} parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportIndivisibleWithBroadcast) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[] parameter(0) convert_0 = bf16[] convert(parameter_0) broadcast_0 = bf16[480,255]{1,0} broadcast(convert_0) parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(broadcast_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[] parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SupportsIndivisibleWithBitcast) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,5,480,17]{3,0,1,2} parameter(0) convert_0 = bf16[3,5,480,17]{3,0,1,2} convert(parameter_0) bitcast_0 = bf16[480,255]{1,0} bitcast(convert_0) parameter_1 = bf16[16,255]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(bitcast_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,5,480,17]{3,0,1,2} parameter(0) p1 = bf16[16,255]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 16, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); } TEST_F(SplitKTest, SkipSmallK) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,64,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,64]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,64]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,64]{1,0} reshape(copy.1) convert.8 = bf16[480,64]{1,0} convert(reshape.5) parameter_1 = bf16[16,64]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,64,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,64]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 128, 4, 1, 4); EXPECT_THAT(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs( tsl::error::CANCELLED, "Too small divisible part of the contracting dimension.")); } TEST_F(SplitKTest, FragmentedKSupported) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[7,2,16,4,20] parameter(0) t0 = f16[2,16,4,20,7] transpose(p0), dimensions={1,2,3,4,0} b0 = f16[2560,7] bitcast(t0) a1 = f16[2560,5] parameter(1) ROOT r = f16[7,5] dot(b0, a1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = f16[7,2,16,4,20] parameter(0) p1 = f16[2560,5] parameter(1) ROOT fusion = f16[7,5] fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(32, 32, 16, 1, 1, 4); config.split_k = 5; EXPECT_THAT( MakeDotSplitKBatch(module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs(tsl::error::CANCELLED, "Contracting dimension is too fragmented.")); config.split_k = 8; TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kReduce); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); TF_ASSERT_OK_AND_ASSIGN( const auto analysis, TritonFusionAnalysis::Execute(*dot_computation, config.split_k)); EXPECT_EQ(dot_computation->root_instruction()->shape(), ShapeUtil::MakeShapeWithDescendingLayout(F16, {8, 7, 5})); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(1, 2560, 0, 2560, ElementsAre(20, 4, 4, 4, 2)))); } TEST_F(SplitKTest, FragmentedKUnsupported) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f32[3,128,77] parameter(0) b0 = f32[384,77] bitcast(p0) a1 = f32[384,25] parameter(1) ROOT r = f32[77,25] dot(b0, a1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[3,128,77] parameter(0) p1 = f32[384,25] parameter(1) ROOT fusion = f32[77,25] fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); EXPECT_THAT( MakeDotSplitKBatch(module->entry_computation()->root_instruction(), config), tsl::testing::StatusIs(tsl::error::CANCELLED, "Contracting dimension is too fragmented.")); } TEST_F(SplitKTest, MakeSplitKWithNonDefaultOutputLayout) { const std::string kHloText = R"( triton_gemm_dot.4842_computation { parameter_0 = bf16[96,96]{1,0} parameter(0) parameter_1 = bf16[96,7]{1,0} parameter(1) dot.0 = bf16[96,7]{0,1} dot(parameter_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT bitcast.2 = bf16[7,3,32]{2,1,0} bitcast(dot.0) } ENTRY e { parameter_0.91 = bf16[96,96]{1,0} parameter(0) parameter_1.86 = bf16[96,7]{1,0} parameter(1) ROOT triton_gemm_dot.4842 = bf16[7,3,32]{2,1,0} fusion(parameter_0.91, parameter_1.86), kind=kCustom, calls=triton_gemm_dot.4842_computation })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 2, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kReduce); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); } TEST_F(SplitKTest, SparseDotWithLhsSparseOperandIsRewritten) { const std::string hlo_text = R"( HloModule test triton_gemm { lhs = f16[2,5,1600] parameter(0) rhs = f16[2,3200,10] parameter(1) meta = u16[2,5,200] parameter(2) ROOT dot = f32[2,5,10] dot(lhs, rhs, meta), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=L.2@2:4 } ENTRY e { lhs = f16[2,5,1600] parameter(0) rhs = f16[2,3200,10] parameter(1) meta = u16[2,5,200] parameter(2) ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta), kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 1); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kReduce); HloInstruction* dot = module->GetComputationWithName("triton_gemm")->root_instruction(); EXPECT_EQ(dot->operand(0)->shape(), ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 5, 4, 400})); EXPECT_EQ(dot->operand(1)->shape(), ShapeUtil::MakeShapeWithDescendingLayout(F16, {2, 4, 800, 10})); EXPECT_EQ(dot->operand(2)->shape(), ShapeUtil::MakeShapeWithDescendingLayout(U16, {2, 5, 4, 50})); } TEST_F(SplitKTest, SparseDotWithRhsSparseOperandTriggersError) { const std::string hlo_text = R"( HloModule test triton_gemm { lhs = f16[2,5,3200] parameter(0) rhs = f16[2,1600,10] parameter(1) meta = u16[2,200,10] parameter(2) ROOT dot = f32[2,5,10] dot(lhs, rhs, meta), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}, sparsity=R.1@2:4 } ENTRY e { lhs = f16[2,5,3200] parameter(0) rhs = f16[2,1600,10] parameter(1) meta = u16[2,200,10] parameter(2) ROOT fusion = f32[2,5,10] fusion(lhs, rhs, meta), kind=kCustom, calls=triton_gemm, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 1); auto result = MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config); EXPECT_FALSE(result.ok()); } class SplitKTestWithMorePreciseReduction : public HloTestBase, public ::testing::WithParamInterface<int> { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction( true); return debug_options; } }; TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitK) { constexpr absl::string_view kHloText = R"( HloModule t triton_gemm_dot { parameter_0 = s8[3,128,5,32]{3,2,1,0} parameter(0) bitcast.1 = s8[3,5,32,128]{2,1,3,0} bitcast(parameter_0) copy.1 = s8[3,5,32,128]{3,2,1,0} copy(bitcast.1) reshape.5 = s8[480,128]{1,0} reshape(copy.1) convert.8 = bf16[480,128]{1,0} convert(reshape.5) parameter_1 = bf16[16,128]{1,0} parameter(1) ROOT dot.0 = bf16[480,16]{1,0} dot(convert.8, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[3,128,5,32]{3,2,1,0} parameter(0) p1 = bf16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant())))); } TEST_F(SplitKTestWithMorePreciseReduction, MakeSplitKWithOutputFusion) { const std::string hlo_text = R"( HloModule t triton_gemm_dot { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) d = f16[480,16]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} c = bf16[] constant(123) n = bf16[] negate(c) bc = bf16[480,16]{1,0} broadcast(n) cv = bf16[480,16]{1,0} convert(d) ROOT a = bf16[480,16]{1,0} multiply(bc, cv) } ENTRY e { p0 = f16[480,128]{1,0} parameter(0) p1 = f16[16,128]{1,0} parameter(1) ROOT fusion = bf16[480,16]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot, backend_config="__triton_gemm" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 16, 16, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Convert(m::Reduce(m::Fusion(), m::Constant())))); } TEST_F(SplitKTest, MakeSplitKWithTransposeAfterDot) { const std::string hlo_text = R"( triton_gemm_dot { p0 = f16[8,288,288]{2,1,0} parameter(0) p1 = f16[8,288,32]{2,0,1} parameter(1) d = f16[8,288,32]{2,1,0} dot(p0, p1), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={1} ROOT t = f16[288,8,32]{2,1,0} transpose(d), dimensions={1,0,2} } ENTRY e { p0 = f16[8,288,288]{2,1,0} parameter(0) p1 = f16[8,288,32]{2,0,1} parameter(1) ROOT fusion = f16[288,8,32]{2,1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 128, 32, 8, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); const auto* transpose = Cast<HloTransposeInstruction>(module->entry_computation() ->root_instruction() ->operand(0) ->fused_instructions_computation() ->root_instruction()); EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 2, 1, 3)); } TEST_F(SplitKTest, MakeSplitKWithTrivialDimension) { const std::string hlo_text = R"( triton_gemm_dot { parameter_0 = f32[1001,1]{1,0} parameter(0) parameter_1 = f32[1001,2048]{1,0} parameter(1) ROOT dot = f32[1,2048]{1,0} dot(parameter_0, parameter_1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY %entry_computation { p0 = f32[1001,1]{1,0} parameter(0) p1 = f32[1001,2048]{1,0} parameter(1) ROOT fusion = f32[1,2048]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_gemm_dot })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); TritonGemmConfig config(16, 128, 64, 4, 1, 4); TF_EXPECT_OK(MakeDotSplitKBatch( module->entry_computation()->root_instruction(), config)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Reduce(m::Fusion(), m::Constant()))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/split_k_gemm_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/split_k_gemm_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9c586ff2-998f-46bb-8cd1-365898f411cf
cpp
tensorflow/tensorflow
triton_fusion_analysis
third_party/xla/xla/service/gpu/triton_fusion_analysis.cc
third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc
#include "xla/service/gpu/triton_fusion_analysis.h" #include <cstdint> #include <memory> #include <optional> #include <queue> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/cudnn_support_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_tiling_propagation.h" #include "xla/service/instruction_fusion.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/tools/hlo_decomposer.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using triton_fusion::DimOrdersAndReqs; using triton_fusion::DimOrdersAndReqsOrError; using triton_fusion::DotRequirements; using triton_fusion::FusionContext; using triton_fusion::GetPropagatedDimOrdersAndRequirements; using triton_fusion::kNoSplitRequirement; using triton_fusion::TransformDirection; } namespace triton_fusion { absl::StatusOr<FusionContext> FusionContext::FromDotOperand( const HloInstruction& dot, const int operand_number, const int split_k) { const int num_split_k_batch_dims = split_k > 1; int split_k_dimension_index = kNoDimensionIndex; TF_ASSIGN_OR_RETURN(int contracting_dimension_index, ContractingDimensionIndex(dot, operand_number)); TF_ASSIGN_OR_RETURN(int non_contracting_dimension_index, NonContractingDimensionIndex(dot, operand_number)); if (split_k > 1) { split_k_dimension_index = contracting_dimension_index - 1; } int splittable_dimension_index = kNoDimensionIndex; if (operand_number == 0 && dot.dot_dimension_numbers().lhs_batch_dimensions_size() - num_split_k_batch_dims == 0) { splittable_dimension_index = non_contracting_dimension_index; } FusionContext context(DotProperties{non_contracting_dimension_index, splittable_dimension_index}, DotRequirements(kNoSplitRequirement)); context.dim_orders_[dot.operand(operand_number)] = DimensionOrder::FromDotOperandOrOutput(*dot.operand(operand_number), split_k_dimension_index); return context; } FusionContext FusionContext::FromDotOutput( const HloInstruction& dot, const int split_k, DotRequirements requirements) { int splittable_dimension_index = kNoDimensionIndex; if (requirements.splittable_dimension_major_part_size > 1) { splittable_dimension_index = (split_k > 1) ? 1 : 0; } FusionContext context(DotProperties{-1, splittable_dimension_index}, std::move(requirements)); context.dim_orders_[&dot] = DimensionOrder::FromDotOperandOrOutput(dot); return context; } namespace { int64_t NumAddedParameters(const HloInstruction& hlo) { if (hlo.opcode() == HloOpcode::kConstant && !ShapeUtil::IsScalar(hlo.shape())) { return 0; } return hlo.operand_count() - 1; } } bool FusionContext::CombineDimOrdersAndReqs(const DimOrdersAndReqs& update) { for (const auto& [key, value] : update.dim_orders) { auto it = dim_orders_.find(key); if (it != dim_orders_.cend() && !it->second.IsPhysicallyEquivalent(value)) { return false; } } DotRequirementsOrError requirements_or_error = CombineDotRequirements(requirements_, update.requirements); if (std::holds_alternative<FusionDecision>(requirements_or_error)) { return false; } requirements_ = std::move(std::get<DotRequirements>(requirements_or_error)); dim_orders_.insert(update.dim_orders.begin(), update.dim_orders.end()); return true; } absl::Status FusionContext::PropagateDimensionOrdersToParameters( const HloInstruction& origin, ConstHloInstructionSet& parameters, ConstHloInstructionMap<TensorIterationSpec>& iter_specs) { absl::flat_hash_set<const HloInstruction*> visited; std::queue<const HloInstruction*> to_process; visited.insert(&origin); to_process.push(&origin); while (!to_process.empty()) { const HloInstruction* hlo = to_process.front(); to_process.pop(); if (hlo->opcode() == HloOpcode::kParameter) { if (!parameters.insert(hlo).second) { return FailedPrecondition( "A parameter is read differently by different users. hlo: %s", hlo->ToString()); } VLOG(5) << hlo->ToString(); } DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements( *hlo, dim_orders_.at(hlo), TransformDirection::kOutputToInput, properties_); if (!std::holds_alternative<DimOrdersAndReqs>(result)) { return FailedPrecondition( "Can not propagate dim orders and requirements."); } if (!CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result))) { return FailedPrecondition("Can not combine dim orders and requirements."); } iter_specs[hlo] = dim_orders_.at(hlo).ToTensorIterationSpec(); for (const HloInstruction* operand : hlo->operands()) { if (!visited.insert(operand).second) { continue; } if (operand->opcode() == HloOpcode::kDot) { continue; } to_process.push(operand); } } return absl::OkStatus(); } } absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute( const HloComputation& computation, const int split_k) { VLOG(5) << computation.ToString(HloPrintOptions::ShortParsable()); TritonFusionAnalysis analysis; const HloInstruction* dot = hlo_query::GetFirstInstructionWithOpcode(computation, HloOpcode::kDot); TF_RET_CHECK(dot != nullptr); TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(*dot, split_k)); return analysis; } absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute( const HloDotInstruction& dot, int split_k) { TritonFusionAnalysis analysis; TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(dot, split_k)); return analysis; } absl::Status TritonFusionAnalysis::ExecuteForProducerConsumer( const HloInstruction& producer, const HloInstruction& consumer, int split_k) { std::unique_ptr<HloModule> new_module = ExtractProducerConsumerIntoNewModule(producer, consumer); auto* new_producer = new_module->entry_computation()->GetInstructionWithName(producer.name()); auto* new_consumer = new_module->entry_computation()->GetInstructionWithName(consumer.name()); std::unique_ptr<HloInstruction> fusion_instruction_holder; HloInstruction* fusion_instruction; if (new_consumer->opcode() == HloOpcode::kFusion) { fusion_instruction = new_consumer; } else { fusion_instruction_holder = HloInstruction::CreateFusion( new_consumer->shape(), new_producer->fusion_kind(), new_consumer); fusion_instruction = fusion_instruction_holder.get(); } if (new_producer->opcode() == HloOpcode::kFusion) { fusion_instruction->MergeFusionInstruction(new_producer); } else { fusion_instruction->FuseInstruction(new_producer); } auto* fused_computation = fusion_instruction->fused_instructions_computation(); return Execute(*fused_computation, split_k).status(); } bool TritonFusionAnalysis::IsBatchDimMinorForInt4Parameter( const HloInstruction& dot, Scope scope) const { CHECK(scope == Scope::LHS || scope == Scope::RHS); const auto& dims = dot.dot_dimension_numbers(); const auto& batch_dims = (scope == Scope::LHS) ? dims.lhs_batch_dimensions() : dims.rhs_batch_dimensions(); if (batch_dims.empty()) return true; int32_t batch_dim = batch_dims.Get(0); CHECK_EQ(batch_dims.size(), 1); const auto& params = parameters_.at(scope); for (const auto& param : params) { if (param->shape().element_type() != S4) continue; const auto* strides = IterSpec(scope, param, batch_dim); if (strides == nullptr) continue; if (strides->front().stride == 1) return false; } return true; } absl::Status TritonFusionAnalysis::ExecuteForDotFusion( const HloInstruction& dot, const int split_k) { DotRequirements lhs_requirements(kNoSplitRequirement); for (const Scope scope : {Scope::LHS, Scope::RHS, Scope::META}) { const int operand_number = static_cast<int>(scope); if (dot.operand_count() < operand_number + 1) { continue; } TF_ASSIGN_OR_RETURN(auto context, FusionContext::FromDotOperand( dot, operand_number, split_k)); TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters( *dot.operand(operand_number), parameters_[scope], iter_specs_[scope])); if (scope == Scope::LHS) { lhs_requirements = context.requirements(); } } auto context = FusionContext::FromDotOutput(dot, split_k, lhs_requirements); const HloInstruction* output = &dot; while (!output->IsRoot()) { TF_RET_CHECK(output->user_count() == 1); const HloInstruction* input = output; if (IsWorkspaceAllocationRoot(*output->users()[0])) { break; } output = output->users()[0]; DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements( *output, context.dim_orders().at(input), TransformDirection::kInputToOutput, context.dot_properties()); if (std::holds_alternative<FusionDecision>(result)) { auto decision = std::get<FusionDecision>(result); return FailedPrecondition("Failed to propagate tiling with error: %s", decision.Explain()); } TF_RET_CHECK( context.CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result))); } TF_RET_CHECK( iter_specs_[Scope::OUTPUT] .insert( {output, context.dim_orders().at(output).ToTensorIterationSpec()}) .second); parameters_[Scope::OUTPUT] = {}; if (output != &dot) { TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters( *output, parameters_[Scope::OUTPUT], iter_specs_[Scope::OUTPUT])); } return absl::OkStatus(); } std::optional<TritonFusionAnalysis::Scope> TritonFusionAnalysis::QueryInstructionScope(const HloInstruction& hlo) const { for (const Scope& scope : {Scope::LHS, Scope::RHS, Scope::OUTPUT}) { if (iter_specs_.at(scope).count(&hlo) > 0) { return scope; } } LOG(WARNING) << "No scope for hlo: " << hlo.ToString(); return std::nullopt; } const TensorIterationSpec::DimIterationSpec* TritonFusionAnalysis::IterSpec( const TritonFusionAnalysis::Scope scope, const HloInstruction* hlo, const int dimension) const { auto hlo_spec = iter_specs_.at(scope).find(hlo); if (hlo_spec != iter_specs_.at(scope).cend()) { return hlo_spec->second.Find(dimension); } return nullptr; } namespace { std::string IterationSpecByInstructionMapToString( const TritonFusionAnalysis::IterationSpecByInstructionMap& m) { return absl::StrCat("IterSpec{", absl::StrJoin(m, ", ", [&](std::string* s, const auto& kv) { absl::StrAppend(s, kv.first->name(), ": ", kv.second.ToString()); }), "}"); } std::string ScopeToString(TritonFusionAnalysis::Scope s) { switch (s) { case TritonFusionAnalysis::Scope::LHS: return "LHS"; case TritonFusionAnalysis::Scope::RHS: return "RHS"; case TritonFusionAnalysis::Scope::META: return "META"; case TritonFusionAnalysis::Scope::OUTPUT: return "OUTPUT"; } } } std::string TritonFusionAnalysis::ToString() const { return absl::StrCat( "TritonFusionAnalysis{\n", absl::StrJoin(iter_specs_, ",\n", [&](std::string* s, const auto& kv) { absl::StrAppend( s, ScopeToString(kv.first), ": ", IterationSpecByInstructionMapToString(kv.second)); }), "\n}"); } } }
#include "xla/service/gpu/triton_fusion_analysis.h" #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/transforms/gemm_fusion.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::FieldsAre; using TritonDotAnalysisTest = HloTestBase; TEST_F(TritonDotAnalysisTest, QueryingOutputScopeParametersAlwaysWorks) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_dot { p0 = f32[8,8] parameter(0) ROOT dot = f32[8,8] dot(p0, p0), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[8,8] parameter(0) ROOT r = f32[8,8] fusion(p0), kind=kCustom, calls=triton_dot })")); TF_ASSERT_OK_AND_ASSIGN( const auto analysis, TritonFusionAnalysis::Execute(*module->entry_computation() ->root_instruction() ->called_computations()[0])); EXPECT_TRUE( analysis.ScopeParameters(TritonFusionAnalysis::Scope::OUTPUT).empty()); } TEST_F(TritonDotAnalysisTest, NopBitcasts) { const std::string hlo_text = R"( HloModule t triton_dot { param_0.1 = s8[48,4]{1,0} parameter(0) bitcast.18 = s8[1,48,4]{2,1,0} bitcast(param_0.1) bitcast.19 = s8[48,4]{1,0} bitcast(bitcast.18) convert.4 = bf16[48,4]{1,0} convert(bitcast.19) param_1.1 = bf16[4,3]{1,0} parameter(1) ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = s8[48,4]{1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) custom-call = bf16[48,3]{1,0} custom-call(p0, p1), custom_call_target="__triton", called_computations={triton_dot} ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0), ElementsAre(FieldsAre(4, 48, 0, 48, ElementsAre(48)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(1, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0), ElementsAre(FieldsAre(3, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(1, 3, 0, 3, ElementsAre(3)))); } TEST_F(TritonDotAnalysisTest, DoNotRemoveTrivialDimensionForDot) { const std::string hlo_text = R"( HloModule t, is_scheduled=true triton_dot { param_0.1 = f32[137,115]{1,0} parameter(0) param_1.1 = f32[1,115]{1,0} parameter(1) ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = f32[137,115]{1,0} parameter(0) p1 = f32[1,115]{1,0} parameter(1) ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_dot, backend_config={"fusion_backend_config": {kind: "__triton_gemm", triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32, "split_k":1,"num_stages":1,"num_warps":2, "num_ctas":1}}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0), ElementsAre(FieldsAre(115, 137, 0, 137, ElementsAre(137)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(1, 115, 0, 115, ElementsAre(115)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0), ElementsAre(FieldsAre(115, 1, 0, 1, ElementsAre(1)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(1, 115, 0, 115, ElementsAre(115)))); } TEST_F(TritonDotAnalysisTest, Merge) { const std::string hlo_text = R"( HloModule t triton_dot { param_0.1 = s8[1,8,6,4]{3,2,1,0} parameter(0) bitcast.18 = s8[48,4]{1,0} bitcast(param_0.1) convert.4 = bf16[48,4]{1,0} convert(bitcast.18) param_1.1 = bf16[4,3]{1,0} parameter(1) ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = s8[1,8,6,4]{3,2,1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) custom-call = bf16[48,3]{1,0} custom-call(p0, p1), custom_call_target="__triton", called_computations={triton_dot} ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0), ElementsAre(FieldsAre(4, 6 * 8, 0, 6 * 8, ElementsAre(6, 8)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(1, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0), ElementsAre(FieldsAre(3, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(1, 3, 0, 3, ElementsAre(3)))); } TEST_F(TritonDotAnalysisTest, Split) { const std::string hlo_text = R"( HloModule t triton_dot { %parameter_1 = f32[24000,2]{1,0} parameter(1) %convert.15 = f16[24000,2]{1,0} convert(%parameter_1) %parameter_0 = f16[4]{0} parameter(0) %bitcast.45 = f16[2,2]{1,0} bitcast(%parameter_0) ROOT %dot.26 = f16[24000,2]{1,0} dot(%convert.15, %bitcast.45), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f16[4]{0} parameter(0) p1 = f32[24000,2]{1,0} parameter(1) ROOT r = f16[24000,2]{1,0} custom-call(p0, p1), custom_call_target="__triton", called_computations={triton_dot} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p1); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p0); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 0), ElementsAre(FieldsAre(2, 24000, 0, 24000, ElementsAre(24000)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 1), ElementsAre(FieldsAre(1, 2, 0, 2, ElementsAre(2)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 0), ElementsAre(FieldsAre(2, 2, 0, 2, ElementsAre(2)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 1), ElementsAre(FieldsAre(1, 2, 0, 2, ElementsAre(2)))); } TEST_F(TritonDotAnalysisTest, TransposeMerge) { const std::string hlo_text = R"( HloModule t triton_dot { param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0) transpose.3 = s8[1,8,6,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1} bitcast.18 = s8[48,4]{1,0} bitcast(transpose.3) convert.4 = bf16[48,4]{1,0} convert(bitcast.18) param_1.1 = bf16[4,3]{1,0} parameter(1) ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = s8[1,4,8,6]{3,2,1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) custom-call = bf16[48,3]{1,0} custom-call(p0, p1), custom_call_target="__triton", called_computations={triton_dot} ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0), ElementsAre(FieldsAre(1, 8 * 6, 0, 8 * 6, ElementsAre(6, 8)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(8 * 6, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0), ElementsAre(FieldsAre(3, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(1, 3, 0, 3, ElementsAre(3)))); } TEST_F(TritonDotAnalysisTest, CopyMerge) { const std::string hlo_text = R"( HloModule t triton_dot { param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0) bitcast.99 = s8[1,8,6,4]{2,1,3,0} bitcast(param_0.1) copy.3 = s8[1,8,6,4]{3,2,1,0} copy(bitcast.99) bitcast.18 = s8[48,4]{1,0} bitcast(copy.3) convert.4 = bf16[48,4]{1,0} convert(bitcast.18) param_1.1 = bf16[4,3]{1,0} parameter(1) ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = s8[1,4,8,6]{3,2,1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) custom-call = bf16[48,3]{1,0} custom-call(p0, p1), custom_call_target="__triton", called_computations={triton_dot} ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0), ElementsAre(FieldsAre(1, 8 * 6, 0, 8 * 6, ElementsAre(6, 8)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(8 * 6, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0), ElementsAre(FieldsAre(3, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(1, 3, 0, 3, ElementsAre(3)))); } TEST_F(TritonDotAnalysisTest, TransposeMergeNCN) { const std::string hlo_text = R"( HloModule t triton_dot { param_0.1 = bf16[3,4,8,1]{3,2,1,0} parameter(0) transpose.3 = bf16[3,8,1,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1} bitcast.18 = bf16[24,4]{1,0} bitcast(transpose.3) param_1.1 = bf16[4,3]{1,0} parameter(1) ROOT dot = bf16[24,3]{1,0} dot(bitcast.18, param_1.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = bf16[3,4,8,1]{3,2,1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) custom-call = bf16[24,3]{1,0} custom-call(p0, p1), custom_call_target="__triton", called_computations={triton_dot} ROOT bitcast.2 = bf16[3,8,1,3]{3,2,1,0} bitcast(custom-call) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation() ->root_instruction() ->operand(0) ->called_computations()[0]; const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0), ElementsAre(FieldsAre(1, 8, 0, 8, ElementsAre(8)), FieldsAre(4 * 8, 3, 0, 3, ElementsAre(3)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(8, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0), ElementsAre(FieldsAre(3, 4, 0, 4, ElementsAre(4)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(1, 3, 0, 3, ElementsAre(3)))); } TEST_F(TritonDotAnalysisTest, TransposeOutput) { const std::string hlo_text = R"( HloModule t triton_dot { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) dot = bf16[24,3]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} bc = bf16[12,2,3]{2,1,0} bitcast(dot) ROOT t = bf16[3,12,2]{2,1,0} transpose(bc), dimensions={2,0,1} } ENTRY e { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) ROOT r = bf16[3,12,2]{2,1,0} fusion(p0, p1), kind=kCustom, calls=triton_dot })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text)); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; const HloInstruction* dot_output = dot_computation->root_instruction(); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 0), ElementsAre(FieldsAre(1, 24, 0, 24, ElementsAre(2, 12)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 1), ElementsAre(FieldsAre(24, 3, 0, 3, ElementsAre(3)))); } TEST_F(TritonDotAnalysisTest, OutputParameterIsHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t triton_dot { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) dot = bf16[24,3]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} p2 = f16[3,24]{1,0} parameter(2) p2t = f16[24,3]{1,0} transpose(p2), dimensions={1,0} p2tc = bf16[24,3]{1,0} convert(p2t) ROOT r = bf16[24,3]{1,0} divide(p2tc, dot) } ENTRY e { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[4,3]{1,0} parameter(1) p2 = f16[3,24]{1,0} parameter(2) ROOT r = bf16[24,3]{1,0} fusion(p0, p1, p2), kind=kCustom, calls=triton_dot })")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; const HloInstruction* output_param = dot_computation->parameter_instruction(2); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ( analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0) ->size(), 1); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0), ElementsAre(FieldsAre(1, 24, 0, 24, ElementsAre(24)))); EXPECT_EQ( analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1) ->size(), 1); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1), ElementsAre(FieldsAre(24, 3, 0, 3, ElementsAre(3)))); } TEST_F(TritonDotAnalysisTest, InputBroadcastFromScalarIsHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t triton_dot { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[] parameter(1) p1b = bf16[4,3] broadcast(p1) ROOT dot = bf16[24,3]{1,0} dot(p0, p1b), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[] parameter(1) ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_dot })")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; const HloInstruction* scalar = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 0), nullptr); EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 1), nullptr); } TEST_F(TritonDotAnalysisTest, InputBroadcastFromVectorIsHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t triton_dot { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[4] parameter(1) p1b = bf16[4,3] broadcast(p1), dimensions={0} ROOT dot = bf16[24,3]{1,0} dot(p0, p1b), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = bf16[24,4]{1,0} parameter(0) p1 = bf16[4] parameter(1) ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom, calls=triton_dot })")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; const HloInstruction* vector = dot_computation->parameter_instruction(1); TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_EQ( analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0)->size(), 1); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0), ElementsAre(FieldsAre(1, 4, 0, 4, ElementsAre(4)))); } TEST_F(TritonDotAnalysisTest, OutputBroadcastIsNotAccepted) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t ENTRY e { p0 = f16[2,35] parameter(0) p0c = bf16[2,35] convert(p0) p1 = bf16[35,2] parameter(1) dot = bf16[2,2] dot(p0c, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT bc = bf16[2,2,100] broadcast(dot), dimensions={0,1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kBroadcast); } TEST_F(TritonDotAnalysisTest, DegenerateSplitFragmentIsHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_gemm_r { Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0) bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1) copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6) bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7) convert.9 = bf16[5040,913]{1,0} convert(bitcast.8) bitcast.32 = bf16[58,913]{1,0} parameter(1) dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32), lhs_contracting_dims={1}, rhs_contracting_dims={1} bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33) copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34) ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35) } ENTRY e { Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0) Arg_1.2 = bf16[58,913]{1,0} parameter(1) ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom, calls=triton_gemm_r, backend_config={kind: "__triton_gemm"} })")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_computation->root_instruction(), 0), ElementsAre(FieldsAre(1, 8 * 21, 0, 8 * 21, ElementsAre(21, 8)), FieldsAre(8 * 21 * 58, 30, 0, 30, ElementsAre(30)))); } TEST_F(TritonDotAnalysisTest, HandlesFurtherPropagationFromTrivialSizedTensorGracefully) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_gemm_r { a = f32[3,3]{1,0} parameter(0) constant = f32[1,1]{1,0} constant({ {0} }) broadcast = f32[1,1]{1,0} broadcast(constant), dimensions={0,1} reshape = f32[] reshape(broadcast) broadcast2 = f32[3,3]{1,0} broadcast(reshape), dimensions={} ROOT dot = f32[3,3]{1,0} dot(a, broadcast2), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { a = f32[3,3]{1,0} parameter(0) ROOT dot = f32[3,3]{1,0} fusion(a), kind=kCustom, calls=triton_gemm_r, backend_config={kind: "__triton_gemm"} } )")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; absl::StatusOr<TritonFusionAnalysis> analysis = TritonFusionAnalysis::Execute(*dot_computation); (void)analysis; } TEST_F(TritonDotAnalysisTest, DynamicSliceIsSupported) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_gemm { dot_lhs = f32[2,18]{1,0} parameter(0) dynamic_slice_input = f32[96,2]{1,0} parameter(1) start_index0 = s32[] parameter(2) start_index1 = s32[] parameter(3) dynamic_slice = f32[64,2]{1,0} dynamic-slice(dynamic_slice_input, start_index0, start_index1), dynamic_slice_sizes={64,2} ROOT dot = f32[18,64]{1,0} dot(dot_lhs, dynamic_slice), lhs_contracting_dims={0}, rhs_contracting_dims={1} } ENTRY e { dot_lhs = f32[2,18]{1,0} parameter(0) dynamic_slice_input = f32[96,2]{1,0} parameter(1) start_index0 = s32[] parameter(2) start_index1 = s32[] parameter(3) ROOT triton_gemm_d = f32[18,64]{1,0} fusion(dot_lhs, dynamic_slice_input, start_index0, start_index1), kind=kCustom, calls=triton_gemm, backend_config={"kind":"__triton_gemm"} } )")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0), ElementsAre(FieldsAre(18, 2, 0, 2, ElementsAre(2)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(1, 18, 0, 18, ElementsAre(18)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0), ElementsAre(FieldsAre(2, 96, 0, 96, ElementsAre(96)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(1, 2, 0, 2, ElementsAre(2)))); } TEST_F(TritonDotAnalysisTest, SparseDot) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_gemm { lhs = bf16[5,16] parameter(0) rhs = bf16[32,10] parameter(1) meta = u16[5,2] parameter(2) ROOT dot = f32[5,10] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4 } ENTRY main { lhs = bf16[5,16] parameter(0) rhs = bf16[32,10] parameter(1) meta = u16[5,2] parameter(2) ROOT out = f32[5,10] fusion(lhs, rhs, meta), kind=kCustom, calls=triton_gemm, backend_config={kind:"__triton_gemm"} } )")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::META, dot_computation->parameter_instruction(2), 0), ::testing::SizeIs(1)); } TEST_F(TritonDotAnalysisTest, QueryScopeAlwaysWorks) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_gemm_r { Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0) bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1) copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6) bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7) convert.9 = bf16[5040,913]{1,0} convert(bitcast.8) bitcast.32 = bf16[58,913]{1,0} parameter(1) dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32), lhs_contracting_dims={1}, rhs_contracting_dims={1} bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33) copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34) ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35) } ENTRY e { Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0) Arg_1.2 = bf16[58,913]{1,0} parameter(1) ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom, calls=triton_gemm_r, backend_config={kind: "__triton_gemm"} })")); const HloComputation* dot_computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*dot_computation)); for (const auto& hlo : dot_computation->instructions()) { if (hlo->opcode() != HloOpcode::kDot) { EXPECT_TRUE(analysis.QueryInstructionScope(*hlo).has_value()); } } } TEST_F(TritonDotAnalysisTest, PadWithTrivialDimension) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t triton_gemm_dot { parameter_0 = f32[1001,1]{1,0} parameter(0) constant = f32[] constant(0) pad = f32[1004,1]{1,0} pad(parameter_0, constant), padding=0_3x0_0 bitcast = f32[4,251,1]{2,1,0} bitcast(pad) parameter_1 = f32[4,251,2048]{2,1,0} parameter(1) ROOT dot = f32[4,1,2048]{2,1,0} dot(bitcast, parameter_1), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} })")); const HloComputation* dot_computation = *module->computations().begin(); TF_ASSERT_OK_AND_ASSIGN( TritonFusionAnalysis analysis, TritonFusionAnalysis::Execute(*dot_computation, 4)); const HloInstruction* p0 = dot_computation->parameter_instruction(0); const HloInstruction* p1 = dot_computation->parameter_instruction(1); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(), p0); EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(), p1); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1), ElementsAre(FieldsAre(1, 1001, 0, 1001, ElementsAre(1001)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 2), ElementsAre(FieldsAre(1, 1, 0, 1, ElementsAre(1)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1), ElementsAre(FieldsAre(2048, 1004, 0, 1004, ElementsAre(251, 4)))); EXPECT_THAT( *analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 2), ElementsAre(FieldsAre(1, 2048, 0, 2048, ElementsAre(2048)))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
873af691-a7ed-4983-a8cd-f970eb8fa7af
cpp
tensorflow/tensorflow
gpu_latency_hiding_scheduler
third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler.cc
third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler_test.cc
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h" #include <cstdint> #include <tuple> #include <utility> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/latency_hiding_scheduler.h" #include "xla/shape.h" #include "xla/shape_util.h" namespace xla { namespace gpu { namespace { static constexpr int64_t kCostlyAllReduceThreshold = 30 * 1024 * 1024; static constexpr int64_t kCostlyAllReduceMultiplier = 4; bool IsNopInstruction(const HloInstruction& hlo) { HloOpcode op = hlo.opcode(); return op == HloOpcode::kGetTupleElement || op == HloOpcode::kBitcast || op == HloOpcode::kConstant || op == HloOpcode::kParameter || op == HloOpcode::kTuple || op == HloOpcode::kPartitionId || op == HloOpcode::kReplicaId || hlo.IsEffectiveBitcast() || op == HloOpcode::kOptimizationBarrier; } bool IsAsyncComputeOp(const HloInstruction& hlo) { return (hlo.opcode() == HloOpcode::kAsyncStart || hlo.opcode() == HloOpcode::kAsyncDone) && !hlo_query::IsCollectiveCommunicationOp(hlo.async_wrapped_opcode()) && hlo.async_execution_thread() != hlo.parent()->execution_thread(); } int64_t GetPipelineStream(const HloInstruction& start) { auto it = start.frontend_attributes().map().find(kSendRecvPipelineAttr); if (it != start.frontend_attributes().map().end() && it->second == "1") { return 1; } return 0; } std::pair<GpuResourceType, ResourceUsageType> GetP2PResourceAndUsage( const HloInstruction& instr, const CanonicalAsyncOp& op) { ResourceUsageType usage = op.outer == HloOpcode::kAsyncStart ? ResourceUsageType::kResourceRelease : ResourceUsageType::kResourceOccupy; int64_t pipeline = GetPipelineStream(instr); HloOpcode opcode = op.inner; GpuResourceType resource; if (pipeline == 0) { resource = opcode == HloOpcode::kSend ? GpuResourceType::kGpuAsyncStreamSend0 : GpuResourceType::kGpuAsyncStreamRecv0; } else { resource = opcode == HloOpcode::kSend ? GpuResourceType::kGpuAsyncStreamSend1 : GpuResourceType::kGpuAsyncStreamRecv1; } return {resource, usage}; } bool IsGpuAsyncStart(const HloInstruction& hlo) { return (hlo_query::IsAsyncCollectiveStartOp(&hlo, true) && !IsSyncCollective(&hlo)) || IsAsyncComputeOp(hlo); } bool IsGpuAsyncDone(const HloInstruction& hlo) { return (hlo_query::IsAsyncCollectiveDoneOp(&hlo, true) && !IsSyncCollective(hlo.operand(0))) || IsAsyncComputeOp(hlo); } bool IsAsyncPair(const HloInstruction& from, const HloInstruction& target) { return IsGpuAsyncStart(from) && IsGpuAsyncDone(target); } } int64_t GetSizeOfShape(const Shape& shape, int pointer_size) { int64_t size = ShapeUtil::ByteSizeOf(shape, pointer_size); if (shape.IsTuple() || shape.is_static()) { return size; } int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size(); return size + metadata_size; } CanonicalAsyncOp GpuGetCanonicalAsyncOp(const HloInstruction& hlo) { switch (hlo.opcode()) { case HloOpcode::kSend: return {HloOpcode::kAsyncStart, HloOpcode::kSend}; case HloOpcode::kSendDone: return {HloOpcode::kAsyncDone, HloOpcode::kSend}; case HloOpcode::kRecv: return {HloOpcode::kAsyncStart, HloOpcode::kRecv}; case HloOpcode::kRecvDone: return {HloOpcode::kAsyncDone, HloOpcode::kRecv}; default: return DefaultGetCanonicalAsyncOp(hlo); } } GpuAsyncTrackerBase::GpuAsyncTrackerBase(const SchedulerConfig& config, GetCanonicalAsyncOpFunc func) : AsyncTracker(config, func) {} bool GpuAsyncTrackerBase::IsSupportedAsyncDone( const HloInstruction& hlo) const { return IsGpuAsyncDone(hlo); } bool GpuAsyncTrackerBase::IsSupportedAsyncStart( const HloInstruction& hlo) const { return IsGpuAsyncStart(hlo); } void GpuAsyncTrackerBase::PostProcessScheduleGraph( HloScheduleGraph* schedule_graph, const LatencyEstimator* latency_estimator) const { for (auto inst : schedule_graph->GetOriginalInstrList()) { if (inst->opcode() == HloOpcode::kRecv) { if (inst->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) { HloGraphNode& node = schedule_graph->GetNode(inst); node.SetForceEarly(true); VLOG(5) << "Setting force early for instruction: " << inst->ToString(); } } if (inst->has_backend_config()) { auto gpu_config = inst->backend_config<GpuBackendConfig>(); if (gpu_config.ok()) { HloGraphNode& node = schedule_graph->GetNode(inst); node.SetForceDelay(gpu_config->force_earliest_schedule()); VLOG(5) << "Setting force delay for instruction: " << inst->ToString(); } } } } GpuAsyncTracker::GpuAsyncTracker(const SchedulerConfig& config) : GpuAsyncTrackerBase(config) {} ResourcesVector GpuAsyncTracker::GetResourcesFromInstruction( const HloInstruction& instr) const { CanonicalAsyncOp op = GetCanonicalAsyncOp(instr); if (op.outer == HloOpcode::kAsyncStart || op.outer == HloOpcode::kAsyncDone) { ResourceUsageType usage; GpuResourceType resource; if (op.inner == HloOpcode::kSend || op.inner == HloOpcode::kRecv) { std::tie(resource, usage) = GetP2PResourceAndUsage(instr, op); } else { usage = op.outer == HloOpcode::kAsyncStart ? ResourceUsageType::kResourceRelease : ResourceUsageType::kResourceOccupy; resource = hlo_query::IsCollectiveCommunicationOp(op.inner) ? GpuResourceType::kGpuAsyncStreamCollectives : GpuResourceType::kGpuAsyncStreamComputes; } return {std::make_pair( GetFirstTargetDefinedResource() + static_cast<int64_t>(resource), usage)}; } return GpuAsyncTrackerBase::GetResourcesFromInstruction(instr); } int64_t GpuAsyncTracker::GetNumTargetDefinedResources() const { return static_cast<int64_t>(GpuResourceType::kNumTargetResources); }; int64_t GpuAsyncTracker::GetNumAvailableResources(int64_t resource_type) const { const int64_t first_target_resource = GetFirstTargetDefinedResource(); if (resource_type < first_target_resource) { return GpuAsyncTrackerBase::GetNumAvailableResources(resource_type); } CHECK_LT(resource_type, first_target_resource + static_cast<int64_t>(GpuResourceType::kNumTargetResources)); if ((resource_type - first_target_resource) == static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamComputes)) { return 2; } return 1; } absl::string_view GpuAsyncTracker::GetResourceName( int64_t resource_type) const { const int64_t first_target_resource = GetFirstTargetDefinedResource(); if (resource_type < first_target_resource) { return GpuAsyncTrackerBase::GetResourceName(resource_type); } CHECK_LE(resource_type, first_target_resource + GetNumTargetDefinedResources()); switch (static_cast<GpuResourceType>(resource_type - first_target_resource)) { case GpuResourceType::kGpuAsyncStreamSend0: return "kGpuAsyncStreamSend0"; case GpuResourceType::kGpuAsyncStreamSend1: return "kGpuAsyncStreamSend1"; case GpuResourceType::kGpuAsyncStreamRecv0: return "kGpuAsyncStreamRecv0"; case GpuResourceType::kGpuAsyncStreamRecv1: return "kGpuAsyncStreamRecv1"; case GpuResourceType::kGpuAsyncStreamCollectives: return "kGpuAsyncStreamCollectives"; case GpuResourceType::kGpuAsyncStreamComputes: return "kGpuAsyncStreamComputes"; default: return "kUnsupportedResource"; } } ResourceHazardType GpuAsyncTracker::GetResourceHazardType( int64_t resource_type) const { const int64_t first_target_resource = GetFirstTargetDefinedResource(); if (resource_type < first_target_resource) { return GpuAsyncTrackerBase::GetResourceHazardType(resource_type); } CHECK_LE(resource_type, first_target_resource + GetNumTargetDefinedResources()); return ResourceHazardType::kUnshareable; } int64_t GpuAsyncTracker::GetNumResourcesPerInstruction( int64_t resource_type, const HloInstruction& instr) const { int64_t num_resources = GpuAsyncTrackerBase::GetNumResourcesPerInstruction(resource_type, instr); if (num_resources <= 0 || instr.opcode() != HloOpcode::kWhile) { return num_resources; } int64_t first_p2p_resource = GetFirstTargetDefinedResource() + static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamSend0); if (resource_type < first_p2p_resource || resource_type > first_p2p_resource + 4) { return num_resources; } auto find_instruction_for_pipeline = [&](HloOpcode opcode, int64_t pipeline) { for (auto user1 : instr.users()) { if (user1->opcode() == HloOpcode::kGetTupleElement) { for (auto user2 : user1->users()) { if (user2->opcode() == opcode) { if (GetPipelineStream(*user2) == pipeline) { return true; } } } } } return false; }; bool found; if (resource_type == first_p2p_resource) { found = find_instruction_for_pipeline(HloOpcode::kSendDone, 0); } else if (resource_type == first_p2p_resource + 1) { found = find_instruction_for_pipeline(HloOpcode::kSendDone, 1); } else if (resource_type == first_p2p_resource + 2) { found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 0); } else { found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 1); } return num_resources - (found ? 1 : 0); } GpuLatencyEstimator::GpuLatencyEstimator(int64_t pointer_size, GetCanonicalAsyncOpFunc func) : ApproximateLatencyEstimator(func), pointer_size_(pointer_size) {} ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::NodeCost( const HloInstruction* instr) const { if (IsNopInstruction(*instr)) { return 0.0; } if (instr->opcode() == HloOpcode::kCustomCall) { if (IsCublasGemm(*instr) || IsCustomCallToDnnConvolution(*instr)) { return ApproximateLatencyEstimator::kMediumCost; } return ApproximateLatencyEstimator::kMediumCost; } return ApproximateLatencyEstimator::NodeCost(instr); } ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::GetLatencyBetween( const HloGraphNode& from, const HloGraphNode& to) const { if (IsAsyncPair(from, to)) { if (from.GetInstr().opcode() == HloOpcode::kRecv) { return ApproximateLatencyEstimator::kLowLatency; } else if (from.GetInstr().opcode() == HloOpcode::kSend) { return ApproximateLatencyEstimator::kHighLatency * 10; } bool enable_approx_collectives = from.GetInstr() .GetModule() ->config() .debug_options() .xla_gpu_enable_approx_costly_collectives(); bool is_all_reduce = from.GetInstr().opcode() == HloOpcode::kAllReduceStart; bool collective_size_exceeds_threshold = GetSizeOfShape(from.GetInstr().shape(), pointer_size_) > kCostlyAllReduceThreshold; if (enable_approx_collectives && is_all_reduce && collective_size_exceeds_threshold) { return ApproximateLatencyEstimator::kHighLatency * kCostlyAllReduceMultiplier; } return ApproximateLatencyEstimator::kHighLatency; } return ApproximateLatencyEstimator::kLowLatency; } void GPUProfileStatisticsAggregator::HandleMissingInstructionCost( const HloInstruction& instruction) { if (!IsNopInstruction(instruction) && instruction.opcode() != HloOpcode::kWhile) { missing_instructions_.insert(&instruction); } } void GPUProfileStatisticsAggregator::HandleFoundInstructionCost( const HloInstruction& instruction) { found_instructions_count_++; } void GPUProfileStatisticsAggregator::HandleMissingInstructionLatency( const HloInstruction& from, const HloInstruction& to) { if (IsAsyncPair(from, to)) { missing_instructions_.insert(&from); } } void GPUProfileStatisticsAggregator::HandleFoundInstructionLatency( const HloInstruction& from, const HloInstruction& to) { found_instructions_count_++; } } }
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/gpu_hlo_schedule.h" #include "xla/service/hlo_module_config.h" #include "xla/service/profile_guided_latency_estimator.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { using ::testing::Property; using ::testing::UnorderedElementsAre; using ::tsl::testing::StatusIs; class GpuLatencyHidingSchedulerBaseTest : public HloTestBase { protected: absl::StatusOr<HloModule*> ScheduleModule(HloModule* module) { auto& test_backend = backend(); const auto& gpu_device_info = test_backend.default_stream_executor()->GetDeviceDescription(); HloModuleConfig config(module->config()); DebugOptions dboptions(config.debug_options()); dboptions.set_xla_gpu_enable_pgle_accuracy_checker(true); config.set_debug_options(dboptions); module->set_config(config); TF_RETURN_IF_ERROR( ScheduleGpuModule(module, 8, gpu_device_info) .status()); return module; } HloModuleConfig GetModuleConfig(absl::string_view fdo_profile) { HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true); debug_options.set_xla_gpu_lhs_enable_gpu_async_tracker(true); config.set_debug_options(debug_options); *config.mutable_fdo_profile() = fdo_profile; return config; } }; TEST_F(GpuLatencyHidingSchedulerBaseTest, GPUProfileStatisticsAggregatorDoesNotCountMissingNoops) { GPUProfileStatisticsAggregator aggregator; ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats(); ASSERT_EQ(before_stats.missing_instructions.size(), 0); ASSERT_EQ(before_stats.found_instructions_count, 0); absl::string_view kFdoProfile = ""; absl::string_view kHloModule = R"( HloModule m ENTRY main { parameter0 = f32[] parameter(0) parameter1 = f32[32] parameter(1) const0 = f32[] constant(42) bitcast0 = f32[2,16] bitcast(parameter1) partition-id0 = u32[] partition-id() replica-id0 = u32[] replica-id() tuple0 = (f32[], f32[2,16], u32[], u32[]) tuple(parameter0, bitcast0, partition-id0, replica-id0) opt-barrier = (f32[], f32[2,16], u32[], u32[]) opt-barrier(tuple0) ROOT _ = get-tuple-element(opt-barrier), index=0 } )"; auto config = GetModuleConfig(kFdoProfile); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloModule, config)); for (const HloInstruction* instr : module->entry_computation()->instructions()) { aggregator.HandleMissingInstructionCost(*instr); ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats(); EXPECT_EQ(after_stats.missing_instructions.size(), 0); EXPECT_EQ(after_stats.found_instructions_count, 0); } } TEST_F(GpuLatencyHidingSchedulerBaseTest, GPUProfileStatisticsAggregatorCountsMissingInstruction) { GPUProfileStatisticsAggregator aggregator; ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats(); ASSERT_EQ(before_stats.missing_instructions.size(), 0); ASSERT_EQ(before_stats.found_instructions_count, 0); absl::string_view kFdoProfile = R"pb( costs { name: "dot0" cost_us: 100.0 } )pb"; absl::string_view kHloModule = R"( HloModule m ENTRY main { parameter0 = f32[] parameter(0) parameter1 = f32[32] parameter(1) const0 = f32[] constant(42) add0 = f32[] add(parameter0, const0) bitcast0 = f32[2,16] bitcast(parameter1) tuple0 = (f32[], f32[2,16]) tuple(add0, bitcast0) ROOT _ = get-tuple-element(tuple0), index=0 } )"; auto config = GetModuleConfig(kFdoProfile); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloModule, config)); for (const HloInstruction* instr : module->entry_computation()->instructions()) { aggregator.HandleMissingInstructionCost(*instr); } ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats(); EXPECT_EQ(after_stats.missing_instructions.size(), 1); EXPECT_EQ((*after_stats.missing_instructions.begin())->opcode(), HloOpcode::kAdd); EXPECT_EQ(after_stats.found_instructions_count, 0); } TEST_F(GpuLatencyHidingSchedulerBaseTest, GPUProfileStatisticsAggregatorCountsMissingAsyncPairs) { GPUProfileStatisticsAggregator aggregator; ProfileStatisticsAggregator::Statistics before_stats = aggregator.GetStats(); ASSERT_EQ(before_stats.missing_instructions.size(), 0); ASSERT_EQ(before_stats.found_instructions_count, 0); absl::string_view kFdoProfile = ""; absl::string_view kHloModule = R"( HloModule m reduce { x = f32[] parameter(0) y = f32[] parameter(1) ROOT _ = f32[] add(x, y) } ENTRY main { p0 = f32[] parameter(0) p1 = f32[2] parameter(1) ar_0 = f32[] all-reduce-start(p0), to_apply=reduce ar_1 = f32[] all-reduce-done(ar_0) rs_0 = ((f32[2]), f32[1]) reduce-scatter-start(p1), to_apply=reduce, dimensions={0} rs_1 = f32[1] reduce-scatter-done(rs_0) ag_0 = (f32[2], f32[4]) all-gather-start(p1), replica_groups={{0,1}}, dimensions={0} ag_1 = f32[4] all-gather-done(ag_0) ROOT _ = (f32[], f32[1], f32[4]) tuple(ar_1, rs_1, ag_1) } )"; auto config = GetModuleConfig(kFdoProfile); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloModule, config)); for (const HloInstruction* instr : module->entry_computation()->instructions()) { for (const HloInstruction* user : instr->users()) { aggregator.HandleMissingInstructionLatency(*instr, *user); } } ProfileStatisticsAggregator::Statistics after_stats = aggregator.GetStats(); EXPECT_EQ(after_stats.found_instructions_count, 0); EXPECT_EQ(after_stats.missing_instructions.size(), 3); EXPECT_THAT( after_stats.missing_instructions, UnorderedElementsAre( Property(&HloInstruction::opcode, HloOpcode::kAllReduceStart), Property(&HloInstruction::opcode, HloOpcode::kAsyncStart), Property(&HloInstruction::opcode, HloOpcode::kAllGatherStart))); } TEST_F(GpuLatencyHidingSchedulerBaseTest, ScheduleGpuModuleErrorsOutOnMissingInstrucitonsForAWhileLoopBody) { absl::string_view kFdoProfile = R"pb( costs { name: "dot0" cost_us: 100.0 } )pb"; absl::string_view kHloModule = R"( HloModule m loop_body { p = (u32[], f32[1]) parameter(0) t0 = u32[] get-tuple-element(p), index=0 t1 = f32[1] get-tuple-element(p), index=1 add0 = f32[1] add(t1, t1) ROOT _ = (u32[],f32[1]) tuple(t0,t1) } loop_cond { p1 = (u32[], f32[1]) parameter(0) count = u32[] get-tuple-element(p1), index=0 ub = u32[] constant(2) ROOT _ = pred[] compare(count, ub), direction=LT } ENTRY main { p2 = f32[1] parameter(0) ind = u32[] constant(1) t = (u32[],f32[1]) tuple(ind,p2) w = (u32[],f32[1]) while(t), body=loop_body, condition=loop_cond ROOT _ = f32[1] get-tuple-element(w), index=1 } )"; auto config = GetModuleConfig(kFdoProfile); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule, config)); EXPECT_THAT(ScheduleModule(module.get()), StatusIs(absl::StatusCode::kInvalidArgument)); } TEST_F(GpuLatencyHidingSchedulerBaseTest, ScheduleGpuModuleErrorsOutOnMissingInstrucitonsForAnEntryComputation) { absl::string_view kFdoProfile = R"pb( costs { name: "dot0" cost_us: 100.0 } )pb"; absl::string_view kHloModule = R"( HloModule m ENTRY main { p0 = f32[1] parameter(0) ROOT add0 = f32[1] add(p0,p0) } )"; auto config = GetModuleConfig(kFdoProfile); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule, config)); EXPECT_THAT(ScheduleModule(module.get()), StatusIs(absl::StatusCode::kInvalidArgument)); } TEST_F(GpuLatencyHidingSchedulerBaseTest, ScheduleGpuModulePassesOnFullFDOProfile) { absl::string_view kFdoProfile = R"pb( costs { name: "add0" cost_us: 100.0 } )pb"; absl::string_view kHloModule = R"( HloModule m ENTRY main { p0 = f32[1] parameter(0) ROOT add0 = f32[1] add(p0,p0) } )"; auto config = GetModuleConfig(kFdoProfile); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloModule, config)); TF_EXPECT_OK(ScheduleModule(module.get())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_latency_hiding_scheduler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ae38afae-cea4-480b-bc81-c353972f7926
cpp
tensorflow/tensorflow
reduction_utils
third_party/xla/xla/service/gpu/reduction_utils.cc
third_party/xla/xla/service/gpu/reduction_utils_test.cc
#include "xla/service/gpu/reduction_utils.h" #include <algorithm> #include <array> #include <atomic> #include <cstdint> #include <ostream> #include "absl/algorithm/container.h" #include "absl/base/const_init.h" #include "absl/strings/str_join.h" #include "absl/synchronization/mutex.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/semantic_version.h" #include "xla/util.h" #include "tsl/platform/logging.h" #ifdef GOOGLE_CUDA #include "xla/service/gpu/gpu_asm_opts_util.h" #include "xla/stream_executor/cuda/cuda_asm_compiler.h" #endif namespace xla { namespace gpu { namespace { Vector3 PartitionShapeByMiddleDimensions( const Shape& shape, absl::Span<const int64_t> dims_middle) { CHECK(LayoutUtil::AreDimensionsConsecutive(shape.layout(), dims_middle)); Vector3 values = {1, 1, 1}; enum Segment { kMajor = 0, kMiddle = 1, kMinor = 2 }; Segment cur_segment = kMinor; for (int64_t cur_dim : LayoutUtil::MinorToMajor(shape)) { if (cur_segment != kMajor) { bool cur_dim_in_middle = absl::c_linear_search(dims_middle, cur_dim); if (cur_segment == kMinor) { if (cur_dim_in_middle) { cur_segment = kMiddle; } } else if (cur_segment == kMiddle) { if (!cur_dim_in_middle) { cur_segment = kMajor; } } } values[cur_segment] *= shape.dimensions(cur_dim); } return values; } } int64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config) { #ifdef GOOGLE_CUDA static absl::Mutex mutex(absl::kConstInit); static std::atomic<bool*> use_reduced_thread_count_atomic = nullptr; bool* use_reduced_thread_count = use_reduced_thread_count_atomic.load(std::memory_order_acquire); if (use_reduced_thread_count == nullptr) { absl::MutexLock lock(&mutex); use_reduced_thread_count = use_reduced_thread_count_atomic.load(std::memory_order_relaxed); if (use_reduced_thread_count == nullptr) { auto ptxas_config = PtxOptsFromDebugOptions(hlo_module_config.debug_options()); auto ptxas_version_tuple = se::GetAsmCompilerVersion(ptxas_config.preferred_cuda_dir); use_reduced_thread_count = new bool(false); if (!ptxas_version_tuple.ok() || ptxas_version_tuple.value() < stream_executor::SemanticVersion{12, 2, 0}) { *use_reduced_thread_count = true; } use_reduced_thread_count_atomic.store(use_reduced_thread_count, std::memory_order_release); } } if (*use_reduced_thread_count) { return 512; } #endif return 1024; } Vector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions) { if (reduction_dimensions.is_row_reduction) { int64_t tile_z = std::min(reduction_dimensions.dimensions[0], BatchedReductionRaceFreeBound()); return {tile_z, 1, 16}; } return {1, 128, 1}; } int64_t ReductionDimensionRaceFreeBound( const HloModuleConfig& hlo_module_config, const ReductionDimensions& reduction_dimensions) { Vector3 reduction_tiling = GetReductionTiling(reduction_dimensions); if (reduction_dimensions.is_row_reduction) { return MinThreadsXRowReduction(hlo_module_config) * reduction_tiling[2]; } return WarpSize() * reduction_tiling[1]; } bool IsUnnestedReductionFasterThanElemental( const ReductionDimensions& reduction_dimensions) { if (reduction_dimensions.is_row_reduction) { return (reduction_dimensions.dimensions[2] >= WarpSize()) || ((WarpSize() % reduction_dimensions.dimensions[2]) == 0); } int64_t major_size = reduction_dimensions.dimensions[1]; int64_t minor_size = reduction_dimensions.dimensions[2]; bool prefer_elemental_emitter = (major_size < WarpSize()) || (major_size < 2 * WarpSize() && minor_size < WarpSize()) || (major_size < 4 * WarpSize() && minor_size < 8) || (major_size < 8 * WarpSize() && minor_size < 3); return !prefer_elemental_emitter; } bool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce) { if (reduce.opcode() != HloOpcode::kReduce) { return false; } const Shape& operand_shape = reduce.operand(0)->shape(); absl::Span<const int64_t> dims_to_reduce = reduce.dimensions(); DimensionVector dims_to_keep; for (int64_t dim = 0; dim < operand_shape.dimensions().size(); ++dim) { if (!absl::c_linear_search(dims_to_reduce, dim)) { dims_to_keep.push_back(dim); } } return (LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(), dims_to_keep) || LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(), dims_to_reduce)) && IsUnnestedReductionFasterThanElemental( GetReductionKindAndContiguousComponents(reduce)); } bool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config, const ReductionDimensions& reduction_dimensions) { if (reduction_dimensions.is_row_reduction) { return reduction_dimensions.dimensions[2] <= ReductionDimensionRaceFreeBound(hlo_module_config, reduction_dimensions) && reduction_dimensions.dimensions[0] <= BatchedReductionRaceFreeBound(); } return reduction_dimensions.dimensions[1] <= ReductionDimensionRaceFreeBound(hlo_module_config, reduction_dimensions); } std::ostream& operator<<(std::ostream& os, const ReductionDimensions& reduction_dimensions) { bool is_row_reduction = reduction_dimensions.is_row_reduction; os << (is_row_reduction ? "row " : "column ") << "reduction [" << absl::StrJoin(reduction_dimensions.dimensions, ",") << "] -> [" << reduction_dimensions.dimensions[0] << ", " << reduction_dimensions .dimensions[is_row_reduction ? ReductionDimensions::kRowKeptDimension : ReductionDimensions::kColMinorKeptDimension] << "]"; return os; } ReductionDimensions GetReductionKindAndContiguousComponents( const HloInstruction& reduce) { Shape input_shape = reduce.operand(0)->shape(); absl::Span<const int64_t> dims_to_reduce = reduce.dimensions(); DimensionVector dims_to_keep; for (int64_t dim = 0; dim < input_shape.rank(); ++dim) { if (!absl::c_linear_search(dims_to_reduce, dim)) { dims_to_keep.push_back(dim); } } if (dims_to_keep.empty()) { return {true, {1, 1, ShapeUtil::ElementsIn(input_shape)}}; } if (LayoutUtil::AreDimensionsConsecutive(input_shape.layout(), dims_to_keep)) { Vector3 shape_partition = PartitionShapeByMiddleDimensions(input_shape, dims_to_keep); if (shape_partition[1] == 1) { return {true, {1, 1, shape_partition[0] * shape_partition[2]}}; } if (shape_partition[2] == 1) { return {false, {1, shape_partition[0], shape_partition[1]}}; } return {true, shape_partition}; } Vector3 shape_partition = PartitionShapeByMiddleDimensions(input_shape, dims_to_reduce); if (shape_partition[2] == 1) { return {true, {1, shape_partition[0], shape_partition[1]}}; } return {false, shape_partition}; } bool IsRealReductionHero(const HloInstruction& root, const HloInstruction& hero) { if (!IsReductionFromOrToContiguousDimensions(hero)) { return false; } return &root == &hero || ReductionIsRaceFree(hero.GetModule()->config(), GetReductionKindAndContiguousComponents(hero)); } bool AreReductionsMultiOutputFusionCompatible( const HloInstruction* reduce_hero, const HloInstruction* first_reduce) { return GetReductionKindAndContiguousComponents(*reduce_hero) == GetReductionKindAndContiguousComponents(*first_reduce); } } }
#include "xla/service/gpu/reduction_utils.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ReductionUtilsTest = HloTestBase; const char kModulePrefix[] = R"( HloModule test_module scalar_add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) })"; TEST_F(ReductionUtilsTest, ReductionsAreMultioutputFusionCompatible) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_sibling1 { p_0 = f32[32,64]{1,0} parameter(0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add } fused_sibling2 { p_0 = f32[32,64]{1,0} parameter(0) neg = f32[32,64]{1,0} negate(p_0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add } ENTRY entry { p_0 = f32[32,64]{1,0} parameter(0) fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1 fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2 ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion1 = root->operand(0); const HloInstruction* fusion2 = root->operand(1); EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible( fusion1->fused_expression_root(), fusion2->fused_expression_root())); } TEST_F(ReductionUtilsTest, ReductionsWithSameCanonicalizedDimsAreMultioutputFusionCompatible) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_sibling1 { p_0 = f32[32,64]{1,0} parameter(0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add } fused_sibling2 { p_0 = f32[32,64]{1,0} parameter(0) bitcast = f32[32,8,8]{2,1,0} bitcast(p_0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(bitcast, constant), dimensions={1,2}, to_apply=scalar_add } ENTRY entry { p_0 = f32[32,64]{1,0} parameter(0) fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1 fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2 ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion1 = root->operand(0); const HloInstruction* fusion2 = root->operand(1); EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible( fusion1->fused_expression_root(), fusion2->fused_expression_root())); } TEST_F(ReductionUtilsTest, ReductionsAreNotMultioutputFusionCompatible_DifferentOperandShapes) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_sibling1 { p_0 = f32[32,64]{1,0} parameter(0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add } fused_sibling2 { p_0 = f32[64,32]{1,0} parameter(0) neg = f32[64,32]{1,0} negate(p_0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={0}, to_apply=scalar_add } ENTRY entry { p_0 = f32[32,64]{1,0} parameter(0) p_1 = f32[64,32]{1,0} parameter(1) fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1 fusion2 = f32[32]{0} fusion(p_1), kind=kInput, calls=fused_sibling2 ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion1 = root->operand(0); const HloInstruction* fusion2 = root->operand(1); EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible( fusion1->fused_expression_root(), fusion2->fused_expression_root())); } TEST_F(ReductionUtilsTest, ReductionsAreNotMultioutputFusionCompatible_DifferentOutputShapes) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_sibling1 { p_0 = f32[32,64]{1,0} parameter(0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add } fused_sibling2 { p_0 = f32[64,32]{1,0} parameter(0) neg = f32[64,32]{1,0} negate(p_0) constant = f32[] constant(0) ROOT reduce = f32[64]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add } ENTRY entry { p_0 = f32[32,64]{1,0} parameter(0) p_1 = f32[64,32]{1,0} parameter(1) fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1 fusion2 = f32[64]{0} fusion(p_1), kind=kInput, calls=fused_sibling2 ROOT root = (f32[32]{0}, f32[64]{0}) tuple(fusion1, fusion2) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion1 = root->operand(0); const HloInstruction* fusion2 = root->operand(1); EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible( fusion1->fused_expression_root(), fusion2->fused_expression_root())); } TEST_F(ReductionUtilsTest, ReductionsAreNotMultioutputFusionCompatible_DifferentReduceDimensions) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_sibling1 { p_0 = f32[32,32]{1,0} parameter(0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={0}, to_apply=scalar_add } fused_sibling2 { p_0 = f32[32,32]{1,0} parameter(0) neg = f32[32,32]{1,0} negate(p_0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add } ENTRY entry { p_0 = f32[32,32]{1,0} parameter(0) fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1 fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2 ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2) })")) .value(); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion1 = root->operand(0); const HloInstruction* fusion2 = root->operand(1); EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible( fusion1->fused_expression_root(), fusion2->fused_expression_root())); } TEST(ReductionDimensionsTest, GetOutputShape) { ReductionDimensions row_reduction{true, {1, 2, 3}}; ReductionDimensions col_reduction{false, {1, 2, 3}}; EXPECT_THAT(row_reduction.GetOutputShape(), ElementsAre(2)); EXPECT_THAT(col_reduction.GetOutputShape(), ElementsAre(1, 3)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
757b4fb9-3ffb-4887-8a6f-740c46156148
cpp
tensorflow/tensorflow
runtime_intrinsics
third_party/xla/xla/service/gpu/runtime_intrinsics.cc
third_party/xla/xla/service/gpu/runtime_intrinsics_test.cc
#include "xla/service/gpu/runtime_intrinsics.h" #include <cstdint> #include <string> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/ascii.h" #include "absl/strings/string_view.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/custom_call_status.h" #include "xla/service/custom_call_target_registry.h" #include "xla/service/platform_util.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_finder.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { std::string GetGpuPlatformName() { return absl::AsciiStrToUpper( PlatformUtil::CanonicalPlatformName("gpu").value()); } absl::Status AssertOnGpu(void* stream_handle, void* buffer, absl::string_view error_msg) { TF_ASSIGN_OR_RETURN( se::Platform * platform, se::PlatformManager::PlatformWithName(GetGpuPlatformName())); TF_ASSIGN_OR_RETURN(se::Stream * stream, stream_executor::FindStream(platform, stream_handle)); if (!stream) { return Internal("Stream not found for: %p", stream_handle); } int8_t expected = false; int64_t byte_size = sizeof(int8_t); CHECK_EQ(byte_size, ShapeUtil::ByteSizeOfPrimitiveType(PrimitiveType::PRED)); TF_RETURN_IF_ERROR(stream->Memcpy( &expected, se::DeviceMemoryBase{buffer, static_cast<uint64_t>(byte_size)}, byte_size)); TF_RETURN_IF_ERROR(stream->BlockHostUntilDone()); if (!static_cast<bool>(expected)) { return Internal("%s", error_msg); } return absl::OkStatus(); } void AssertionCustomCall(void* stream_handle, void** buffers, const char* opaque, int opaque_len, XlaCustomCallStatus* status) { absl::Status s = AssertOnGpu(stream_handle, buffers[0], absl::string_view{opaque, static_cast<uint64_t>(opaque_len)}); if (!s.ok()) { auto msg = s.message(); XlaCustomCallStatusSetFailure(status, msg.data(), msg.size()); } } void NopReturnTokenCustomCall(void* stream_handle, void** buffers, const char* opaque, int opaque_len, XlaCustomCallStatus* status) { VLOG(1) << "NopReturnTokenCustomCall called."; } } XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM( std::string(kXlaGpuAssertCustomCallTag), AssertionCustomCall, GetGpuPlatformName()); XLA_REGISTER_CUSTOM_CALL_TARGET_WITH_SYM( std::string(kNopReturnTokenCustomCallTarget), NopReturnTokenCustomCall, GetGpuPlatformName()); }
#include <memory> #include <utility> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using RuntimeIntrinsicsTest = HloTestBase; TEST_F(RuntimeIntrinsicsTest, NopReturnTokenWorks) { constexpr absl::string_view kHloText = R"( HloModule m ENTRY e { constant = u32[2]{0} constant({0, 1}) ROOT nop_return_token = token[] custom-call(constant), custom_call_target="NopReturnToken", custom_call_has_side_effect=true })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, GetOptimizedModule(kHloText)); EXPECT_EQ(module->entry_computation()->instruction_count(), 2); EXPECT_TRUE(Run(std::move(module), false)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime_intrinsics.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime_intrinsics_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7606cb75-ccae-4f79-b2f0-741f5d1073cb
cpp
tensorflow/tensorflow
ir_emitter
third_party/xla/xla/service/cpu/ir_emitter.cc
third_party/xla/xla/service/cpu/ir_emitter_test.cc
#include "xla/service/cpu/ir_emitter.h" #include <stddef.h> #include <stdint.h> #include <algorithm> #include <cstddef> #include <iterator> #include <limits> #include <map> #include <memory> #include <optional> #include <string> #include <string_view> #include <type_traits> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/cleanup/cleanup.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/meta/type_traits.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" #include "llvm/IR/FMF.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicsX86.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Value.h" #include "xla/hlo/ir/collective_device_list.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/literal_util.h" #include "xla/map_util.h" #include "xla/primitive_util.h" #include "xla/service/buffer_assignment.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/cpu/backend_config.pb.h" #include "xla/service/cpu/cpu_options.h" #include "xla/service/cpu/cpu_runtime.h" #include "xla/service/cpu/dot_op_emitter.h" #include "xla/service/cpu/elemental_math_emitter.h" #include "xla/service/cpu/ir_emission_utils.h" #include "xla/service/cpu/ir_function.h" #include "xla/service/cpu/onednn_config.pb.h" #include "xla/service/cpu/parallel_loop_emitter.h" #include "xla/service/elemental_ir_emitter.h" #include "xla/service/hlo_module_config.h" #include "xla/service/llvm_ir/buffer_assignment_util.h" #include "xla/service/llvm_ir/dynamic_update_slice_util.h" #include "xla/service/llvm_ir/ir_array.h" #include "xla/service/llvm_ir/llvm_loop.h" #include "xla/service/llvm_ir/llvm_type_conversion_util.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/service/llvm_ir/loop_emitter.h" #include "xla/service/llvm_ir/tuple_ops.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/tsl/lib/math/math_util.h" #include "xla/util.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3) #include "xla/service/cpu/onednn_memory_util.h" #endif namespace xla { namespace { using llvm_ir::IrName; using llvm_ir::SetToFirstInsertPoint; } namespace cpu { class IrEmitter::CpuElementalIrEmitter : public ElementalIrEmitter { public: CpuElementalIrEmitter(const HloModuleConfig& module_config, IrEmitter* ir_emitter, llvm::Module* module) : ElementalIrEmitter( module, ir_emitter->b(), Options{true}), hlo_module_config_(module_config), ir_emitter_(ir_emitter) {} protected: absl::StatusOr<llvm::Value*> EmitAtan2(PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs, absl::string_view) override { return xla::cpu::EmitAtan2(module(), *b(), prim_type, lhs, rhs); } absl::StatusOr<llvm::Value*> EmitTanh(PrimitiveType prim_type, llvm::Value* value) override { return xla::cpu::EmitTanh(module(), *b(), prim_type, value); } absl::StatusOr<llvm::Value*> EmitErf(PrimitiveType prim_type, llvm::Value* value) override { return xla::cpu::EmitErf(module(), *b(), prim_type, value); } absl::StatusOr<std::vector<llvm::Value*>> EmitThreadLocalCall( const HloComputation& callee, absl::Span<llvm::Value* const> parameters, absl::string_view name, bool is_reducer) override { return ir_emitter_->EmitThreadLocalCall(callee, parameters, name, is_reducer); } bool fast_min_max() override { return hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max(); } const HloModuleConfig& hlo_module_config_; IrEmitter* ir_emitter_; }; IrEmitter::IrEmitter(mlir::MLIRContext* mlir_context, const HloModule& hlo_module, const BufferAssignment& assignment, llvm::Module* llvm_module, absl::flat_hash_map<const HloInstruction*, int64_t> instruction_to_profile_idx, absl::flat_hash_map<const HloComputation*, int64_t> computation_to_profile_idx, absl::flat_hash_map<const HloComputation*, bool> computation_transitively_contains_custom_call, const TargetMachineFeatures* target_machine_features, bool emit_code_for_msan) : assignment_(assignment), module_(llvm_module), arch_type_(llvm::Triple(llvm_module->getTargetTriple()).getArch()), main_builder_(llvm_module->getContext()), current_builder_(&main_builder_), mlir_context_(mlir_context), instruction_to_profile_idx_(std::move(instruction_to_profile_idx)), computation_to_profile_idx_(std::move(computation_to_profile_idx)), computation_transitively_contains_custom_call_( std::move(computation_transitively_contains_custom_call)), alias_analysis_(hlo_module, assignment, &llvm_module->getContext()), hlo_module_config_(hlo_module.config()), is_top_level_computation_(false), target_machine_features_(*target_machine_features), emit_code_for_msan_(emit_code_for_msan) { b()->setFastMathFlags(llvm_ir::GetCpuFastMathFlags(hlo_module_config_)); absl::Status s = GatherComputationsByAllocationType( &hlo_module, &thread_local_computations_, &global_computations_); absl::c_sort(thread_local_computations_); absl::c_sort(global_computations_); TF_CHECK_OK(s) << "Should have failed buffer assignment."; } IrEmitter::~IrEmitter() { if (!compute_function_.empty()) { LOG(WARNING) << "Compute function stack is not empty: " << compute_function_.size(); } }; void IrEmitter::EmitThreadLocalFunctionEpilogue(HloComputation* computation) { llvm::Argument* out_parameter = compute_function()->result_arg(); llvm_ir::IrArray root_value = GetIrArrayFor(computation->root_instruction()); const Shape& return_shape = computation->root_instruction()->shape(); if (ShapeUtil::IsScalar(return_shape)) { llvm::Value* ret_value = Load(root_value.GetBasePointeeType(), root_value.GetBasePointer(), "load_ret_value"); Store(ret_value, out_parameter); } else { CHECK(return_shape.IsTuple()); llvm::Type* tuple_type = llvm_ir::ShapeToIrType(return_shape, module_); for (int i = 0; i < return_shape.tuple_shapes_size(); i++) { const Shape& element_shape = return_shape.tuple_shapes(i); llvm::Value* destination = llvm_ir::EmitGetTupleElement( element_shape, i, MinimumAlignmentForShape(element_shape), out_parameter, tuple_type, b()); llvm::Value* source = llvm_ir::EmitGetTupleElement( element_shape, i, MinimumAlignmentForShape(element_shape), root_value.GetBasePointer(), root_value.GetBasePointeeType(), b()); Store(Load(IrShapeType(element_shape), source), destination); } } } absl::StatusOr<llvm::Function*> IrEmitter::EmitComputation( HloComputation* computation, absl::string_view function_name_prefix, bool is_top_level_computation, absl::Span<HloInstruction* const> instruction_order, bool allow_reassociation, absl::Span<const llvm::Attribute::AttrKind> function_attributes) { std::string function_name = name_uniquer_.GetUniqueName(function_name_prefix); VLOG(2) << "Emitting IR for CPU function [" << function_name_prefix << "]"; is_top_level_computation_ = is_top_level_computation; allow_reassociation_ = allow_reassociation; num_dynamic_loop_bounds_ = 0; auto backend_config_or = computation->root_instruction()->backend_config<BackendConfig>(); if (backend_config_or.ok() && !backend_config_or->outer_dimension_partitions().empty()) { num_dynamic_loop_bounds_ = backend_config_or->outer_dimension_partitions().size(); } if (computation->root_instruction()->opcode() != HloOpcode::kOutfeed) { TF_ASSIGN_OR_RETURN( computation_root_allocation_, assignment_.GetUniqueTopLevelSlice(computation->root_instruction())); } bool has_thread_local_param = false; for (const HloInstruction* param : computation->parameter_instructions()) { TF_ASSIGN_OR_RETURN(BufferAllocation::Slice param_slice, assignment_.GetUniqueTopLevelSlice(param)); has_thread_local_param |= param_slice.allocation()->is_thread_local(); computation_parameter_allocations_[param_slice.allocation()->index()] = param->parameter_number(); } InitializeIrFunction(function_name); bool use_rdtscp = arch_type_ == llvm::Triple::ArchType::x86 || arch_type_ == llvm::Triple::ArchType::x86_64; profiling_state_ = ProfilingState(use_rdtscp); tracing_state_.set_enabled( computation->parent()->config().cpu_traceme_enabled()); llvm::IRBuilderBase::FastMathFlagGuard guard(*b()); llvm::FastMathFlags flags = b()->getFastMathFlags(); flags.setAllowReassoc(flags.allowReassoc() || allow_reassociation); b()->setFastMathFlags(flags); TF_RETURN_IF_ERROR(computation->AcceptOrdered(this, instruction_order)); llvm::Function* ir_function = compute_function()->function(); for (llvm::Attribute::AttrKind attr : function_attributes) { ir_function->addFnAttr(attr); } InsertOrDie(&emitted_functions_, ComputationToEmit{computation, allow_reassociation}, ir_function); const BufferAllocation* root_allocation = computation_root_allocation_.allocation(); if (root_allocation && (root_allocation->is_thread_local() || (root_allocation->is_constant() && has_thread_local_param))) { EmitThreadLocalFunctionEpilogue(computation); } PopComputeFunction(); computation_root_allocation_ = BufferAllocation::Slice(); computation_parameter_allocations_.clear(); return ir_function; } void IrEmitter::InitializeIrFunction(const std::string& function_name) { llvm::Function::LinkageTypes linkage = is_top_level_computation_ ? llvm::GlobalValue::ExternalLinkage : llvm::GlobalValue::InternalLinkage; compute_function_.emplace(function_name, linkage, hlo_module_config_, module_, b(), num_dynamic_loop_bounds_); } absl::Status IrEmitter::HandleBitcast(HloInstruction* bitcast) { VLOG(2) << "HandleBitcast: " << bitcast->ToString(); emitted_value_[bitcast] = GetEmittedValueFor(bitcast->operand(0)); return absl::OkStatus(); } llvm::Constant* IrEmitter::EmitGlobalForLiteral(const Literal& literal) { llvm::Constant* initializer = llvm_ir::ConvertLiteralToIrConstant(literal, module_); llvm::GlobalVariable* result_global = new llvm::GlobalVariable( *module_, initializer->getType(), true, llvm::GlobalValue::PrivateLinkage, initializer, ""); result_global->setAlignment( llvm::Align(MinimumAlignmentForShape(literal.shape()))); result_global->setUnnamedAddr(llvm::GlobalVariable::UnnamedAddr::Global); return result_global; } absl::Status IrEmitter::EmitConstantGlobals() { for (const BufferAllocation& allocation : assignment_.Allocations()) { if (!allocation.is_constant()) { continue; } const Literal& literal = llvm_ir::LiteralForConstantAllocation(allocation); llvm::Constant* global_for_const; auto it = emitted_literals_.find(LayoutSensitiveLiteralWrapper{literal}); if (it != emitted_literals_.end()) { global_for_const = it->second; } else { global_for_const = EmitGlobalForLiteral(literal); InsertOrDie(&emitted_literals_, LayoutSensitiveLiteralWrapper{literal}, global_for_const); } InsertOrDie(&constant_buffer_to_global_, allocation.index(), global_for_const); } return absl::OkStatus(); } absl::Status IrEmitter::HandleConstant(HloInstruction* constant) { VLOG(2) << "HandleConstant: " << constant->ToString(); return EmitTargetAddressForOp(constant); } absl::Status IrEmitter::HandleCopy(HloInstruction* copy) { if (copy->shape().IsTuple() || (copy->shape().IsArray() && LayoutUtil::Equal(copy->operand(0)->shape().layout(), copy->shape().layout()))) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(copy)); return EmitMemcpy(*(copy->operand(0)), *copy); } else if (copy->shape().IsArray()) { return DefaultAction(copy); } return Unimplemented("unsupported operand type %s for copy instruction", PrimitiveType_Name(copy->shape().element_type())); } int MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) { int64_t byte_size = ShapeUtil::ByteSizeOfPrimitiveType(primitive_type); DCHECK_GE(byte_size, 0); DCHECK_LE(byte_size, 16); return std::min(int64_t{8}, byte_size); } int IrEmitter::MinimumAlignmentForPrimitiveType(PrimitiveType primitive_type) { return ::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type); } int64_t IrEmitter::ByteSizeOf(const Shape& shape) const { return llvm_ir::ByteSizeOf(shape, module_->getDataLayout()); } int IrEmitter::MinimumAlignmentForShape(const Shape& shape) { if (ShapeUtil::IsScalar(shape)) { return MinimumAlignmentForPrimitiveType(shape.element_type()); } int64_t buffer_size = ByteSizeOf(shape); DCHECK_GE(buffer_size, 0); DCHECK_LE(buffer_size, SIZE_MAX); return target_machine_features_.minimum_alignment_for_allocation(buffer_size); } void IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load, const Shape& shape) { int alignment = MinimumAlignmentForShape(shape); if (alignment > 1) { llvm_ir::SetAlignmentMetadataForLoad(load, alignment); } } void IrEmitter::AttachAlignmentMetadataForLoad(llvm::LoadInst* load, int64_t buffer_size) { int alignment = target_machine_features_.minimum_alignment_for_allocation(buffer_size); if (alignment > 1) { llvm_ir::SetAlignmentMetadataForLoad(load, alignment); } } void IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load, const Shape& shape) { AttachDereferenceableMetadataForLoad(load, ByteSizeOf(shape)); } void IrEmitter::AttachDereferenceableMetadataForLoad(llvm::LoadInst* load, int64_t buffer_size) { if (buffer_size > 0) { llvm_ir::SetDereferenceableMetadataForLoad(load, buffer_size); } } void IrEmitter::AttachInvariantLoadMetadataForLoad(llvm::LoadInst* load) const { AttachInvariantLoadMetadataForLoad(load, hlo_module_config_); } void IrEmitter::AttachInvariantLoadMetadataForLoad( llvm::LoadInst* load, const HloModuleConfig& config) { if (config.debug_options().xla_llvm_enable_invariant_load_metadata()) { load->setMetadata(llvm::LLVMContext::MD_invariant_load, llvm::MDNode::get(load->getContext(), {})); } } absl::Status IrEmitter::HandleGetTupleElement( HloInstruction* get_tuple_element) { const HloInstruction* operand = get_tuple_element->operand(0); const Shape& shape = get_tuple_element->shape(); emitted_value_[get_tuple_element] = llvm_ir::EmitGetTupleElement( shape, get_tuple_element->tuple_index(), MinimumAlignmentForShape(shape), GetEmittedValueFor(operand), IrShapeType(operand->shape()), b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleSelect(HloInstruction* select) { auto pred = select->operand(0); TF_RET_CHECK(pred->shape().element_type() == PRED); return DefaultAction(select); } absl::Status IrEmitter::HandleInfeed(HloInstruction* instruction) { HloInfeedInstruction* infeed = Cast<HloInfeedInstruction>(instruction); VLOG(2) << "HandleInfeed: " << infeed->ToString(); const Shape& data_shape = infeed->infeed_shape(); DCHECK(ShapeUtil::Equal(data_shape, ShapeUtil::GetTupleElementShape(infeed->shape(), 0))); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(infeed)); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice, assignment_.GetUniqueSlice(infeed, {0})); llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape); llvm::Type* data_type = IrShapeType(data_shape); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice token_slice, assignment_.GetUniqueSlice(infeed, {1})); llvm::Value* token_address = EmitBufferPointer( token_slice, ShapeUtil::GetTupleElementShape(infeed->shape(), 1)); llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address}, b()); if (data_shape.IsTuple()) { TF_RET_CHECK(!ShapeUtil::IsNestedTuple(data_shape)); std::vector<llvm::Value*> tuple_element_addresses; for (int i = 0; i < data_shape.tuple_shapes_size(); ++i) { TF_ASSIGN_OR_RETURN(BufferAllocation::Slice buffer, assignment_.GetUniqueSlice(infeed, {0, i})); const Shape& tuple_element_shape = ShapeUtil::GetTupleElementShape(data_shape, i); llvm::Value* tuple_element_address = EmitBufferPointer(buffer, tuple_element_shape); TF_RETURN_IF_ERROR(EmitXfeedTransfer( XfeedKind::kInfeed, tuple_element_shape, tuple_element_address)); tuple_element_addresses.push_back(tuple_element_address); } llvm_ir::EmitTuple(llvm_ir::IrArray(data_address, data_type, data_shape), tuple_element_addresses, b()); } else { TF_RETURN_IF_ERROR( EmitXfeedTransfer(XfeedKind::kInfeed, data_shape, data_address)); } return absl::OkStatus(); } absl::Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape, llvm::Value* program_buffer_address) { int64_t length = ByteSizeOf(shape); if (length < 0 || length > std::numeric_limits<int32_t>::max()) { return InvalidArgument( "xfeed (infeed or outfeed) buffer length %d is outside the valid " "size range", length); } int32_t length_32 = static_cast<int32_t>(length); int32_t shape_length; TF_ASSIGN_OR_RETURN( llvm::Value * shape_ptr, llvm_ir::EncodeSelfDescribingShapeConstant(shape, &shape_length, b())); const char* acquire_func_name = kind == XfeedKind::kInfeed ? runtime::kAcquireInfeedBufferForDequeueSymbolName : runtime::kAcquireOutfeedBufferForPopulationSymbolName; llvm::Value* acquired_pointer = EmitCallToFunc( acquire_func_name, {GetExecutableRunOptionsArgument(), b()->getInt32(length_32), shape_ptr, b()->getInt32(shape_length)}, b()->getPtrTy()); if (kind == XfeedKind::kInfeed) { MemCpy(program_buffer_address, llvm::Align(1), acquired_pointer, llvm::Align(1), length_32); } else { MemCpy(acquired_pointer, llvm::Align(1), program_buffer_address, llvm::Align(1), length_32); if (emit_code_for_msan_) { const llvm::DataLayout& dl = module_->getDataLayout(); llvm::Type* intptr_type = b()->getIntPtrTy(dl); EmitCallToFunc( "__msan_unpoison", {acquired_pointer, llvm::ConstantInt::get(intptr_type, length)}, b()->getVoidTy()); } } const char* release_func_name = kind == XfeedKind::kInfeed ? runtime::kReleaseInfeedBufferAfterDequeueSymbolName : runtime::kReleaseOutfeedBufferAfterPopulationSymbolName; EmitCallToFunc(release_func_name, {GetExecutableRunOptionsArgument(), b()->getInt32(length_32), acquired_pointer, shape_ptr, b()->getInt32(shape_length)}, b()->getVoidTy()); return absl::OkStatus(); } absl::Status IrEmitter::HandleOutfeed(HloInstruction* outfeed) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(outfeed)); HloInstruction* operand = outfeed->operands()[0]; const Shape& operand_shape = operand->shape(); llvm::Value* value = GetEmittedValueFor(operand); if (!operand_shape.IsTuple()) { return EmitXfeedTransfer(XfeedKind::kOutfeed, operand_shape, value); } TF_RET_CHECK(!ShapeUtil::IsNestedTuple(operand_shape)); for (int i = 0; i < operand_shape.tuple_shapes_size(); ++i) { const Shape& tuple_element_shape = ShapeUtil::GetTupleElementShape(operand_shape, i); llvm::Value* tuple_element = llvm_ir::EmitGetTupleElement( tuple_element_shape, i, MinimumAlignmentForShape(tuple_element_shape), value, IrShapeType(operand_shape), b()); TF_RETURN_IF_ERROR(EmitXfeedTransfer(XfeedKind::kOutfeed, tuple_element_shape, tuple_element)); } return absl::OkStatus(); } absl::Status IrEmitter::HandleSort(HloInstruction* hlo) { const HloSortInstruction* sort = Cast<HloSortInstruction>(hlo); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(sort)); Shape keys_shape = sort->keys()->shape(); PrimitiveType keys_type = keys_shape.element_type(); if (!primitive_util::IsArrayType(keys_type)) { return Unimplemented("Element type %s not supported in the Sort op on CPU.", PrimitiveType_Name(keys_type)); } std::vector<llvm::Value*> destination_addresses(sort->operand_count()); for (int64_t i = 0; i < sort->operand_count(); ++i) { ShapeIndex shape_index = sort->values_count() > 0 ? ShapeIndex({i}) : ShapeIndex({}); const HloInstruction* operand = sort->operand(i); TF_RET_CHECK( LayoutUtil::LayoutsInShapesEqual(keys_shape, operand->shape())); TF_RET_CHECK(LayoutUtil::LayoutsInShapesEqual( keys_shape, ShapeUtil::GetSubshape(sort->shape(), shape_index))); auto destination_buffer = GetAllocationSlice(*sort, shape_index); destination_addresses[i] = EmitBufferPointer(destination_buffer, operand->shape()); auto source_address = GetAllocationSlice(*operand); if (destination_buffer != source_address) { int64_t primitive_type_size = ShapeUtil::ByteSizeOfPrimitiveType(operand->shape().element_type()); auto source_buffer = GetEmittedValueFor(operand); int64_t size = ByteSizeOf(operand->shape()); MemCpy(destination_addresses[i], llvm::Align(primitive_type_size), source_buffer, llvm::Align(primitive_type_size), size); } } Shape normalized_keys_shape = ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(keys_shape); auto logical_to_physical = LayoutUtil::MakeLogicalToPhysical(keys_shape.layout()); TF_RET_CHECK(sort->sort_dimension() < logical_to_physical.size()); int64_t physical_dimension_to_sort = logical_to_physical[sort->sort_dimension()]; int64_t sort_dimension_elements = normalized_keys_shape.dimensions(physical_dimension_to_sort); int64_t higher_dimensions = 1; for (int64_t i = 0; i < physical_dimension_to_sort; ++i) { higher_dimensions *= normalized_keys_shape.dimensions(i); } int64_t lower_dimensions = 1; for (int64_t i = normalized_keys_shape.rank() - 1; i > physical_dimension_to_sort; --i) { lower_dimensions *= normalized_keys_shape.dimensions(i); } CHECK(absl::c_binary_search(thread_local_computations_, sort->to_apply())); llvm::Value* values = llvm_ir::EmitAllocaAtFunctionEntryWithCount( b()->getPtrTy(), b()->getInt32(sort->operand_count()), "cc_values_alloca", b()); llvm::Value* sizes = llvm_ir::EmitAllocaAtFunctionEntryWithCount( b()->getInt32Ty(), b()->getInt32(sort->operand_count()), "cc_sizes_alloca", b()); for (int64_t i = 0; i < sort->operand_count(); ++i) { llvm::Value* slot_in_values_alloca = ConstInBoundsGEP1_32(b()->getPtrTy(), values, i); Store(destination_addresses[i], slot_in_values_alloca); llvm::Value* slot_in_sizes_alloca = ConstInBoundsGEP1_32(b()->getInt32Ty(), sizes, i); llvm::Value* size = b()->getInt32(ShapeUtil::ByteSizeOfPrimitiveType( sort->operand(i)->shape().element_type())); Store(size, slot_in_sizes_alloca); } auto less_than_function = FindOrDie(emitted_functions_, ComputationToEmit{sort->to_apply(), allow_reassociation_}); EmitCallToFunc( runtime::kKeyValueSortSymbolName, {b()->getInt64(higher_dimensions), b()->getInt64(sort_dimension_elements), b()->getInt64(lower_dimensions), values, b()->getInt32(sort->operand_count()), sizes, b()->getInt1(sort->is_stable()), GetExecutableRunOptionsArgument(), GetProfileCountersArgument(), less_than_function}, b()->getVoidTy()); if (sort->values_count() > 0) { llvm_ir::EmitTuple(GetIrArrayFor(sort), destination_addresses, b()); } return absl::OkStatus(); } absl::Status IrEmitter::HandleTuple(HloInstruction* tuple) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(tuple)); llvm::SmallVector<llvm::Value*> base_ptrs; for (auto operand : tuple->operands()) { base_ptrs.push_back(GetEmittedValueFor(operand)); } llvm_ir::EmitTuple(GetIrArrayFor(tuple), base_ptrs, b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) { bool saved_allow_reassociation = allow_reassociation_; allow_reassociation_ = true; absl::Status status = DefaultAction(reduce_window); allow_reassociation_ = saved_allow_reassociation; return status; } absl::Status IrEmitter::HandleSelectAndScatter( HloInstruction* select_and_scatter) { CHECK_EQ(select_and_scatter->operand_count(), 3); const auto operand = select_and_scatter->operand(0); const auto source = select_and_scatter->operand(1); return HandleSelectAndScatter(select_and_scatter, GetIrArrayFor(operand), GetIrArrayFor(source), GetIrArrayFor(select_and_scatter)); } absl::Status IrEmitter::HandleSelectAndScatter( HloInstruction* select_and_scatter, const llvm_ir::IrArray& operand_array, const llvm_ir::IrArray& source_array, const llvm_ir::IrArray& output_array) { CHECK_EQ(select_and_scatter->operand_count(), 3); const auto operand = select_and_scatter->operand(0); const auto source = select_and_scatter->operand(1); const auto init_value = select_and_scatter->operand(2); const Window& window = select_and_scatter->window(); PrimitiveType operand_element_type = operand->shape().element_type(); const int64_t rank = operand->shape().rank(); CHECK_EQ(rank, source->shape().rank()); CHECK_EQ(rank, window.dimensions_size()); if (window_util::HasDilation(window)) { return Unimplemented( "Dilation for SelectAndScatter is not implemented on CPU. "); } TF_RETURN_IF_ERROR(EmitTargetElementLoop( select_and_scatter, IrName(select_and_scatter, "init"), [this, init_value](const llvm_ir::IrArray::Index& target_index) { llvm::Value* init_value_addr = GetEmittedValueFor(init_value); return Load(IrShapeType(init_value->shape()), init_value_addr); }, std::optional<llvm_ir::IrArray>(output_array))); llvm_ir::ForLoopNest source_loops(IrName(select_and_scatter), b()); const llvm_ir::IrArray::Index source_index = source_loops.AddLoopsForShape(source->shape(), "source"); SetToFirstInsertPoint(source_loops.GetInnerLoopBodyBasicBlock(), b()); llvm::AllocaInst* selected_value_address = llvm_ir::EmitAllocaAtFunctionEntry( llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_), "selected_value_address", b(), MinimumAlignmentForPrimitiveType(operand_element_type)); llvm::AllocaInst* selected_index_address = llvm_ir::EmitAllocaAtFunctionEntryWithCount( b()->getInt64Ty(), b()->getInt32(rank), "selected_index_address", b()); llvm::AllocaInst* initialized_flag_address = llvm_ir::EmitAllocaAtFunctionEntry(b()->getInt1Ty(), "initialized_flag_address", b()); Store(b()->getInt1(false), initialized_flag_address); llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, "window"), b()); llvm::SmallVector<int64_t> window_size; for (const auto& dim : window.dimensions()) { window_size.push_back(dim.size()); } const llvm_ir::IrArray::Index window_index = window_loops.AddLoopsForShape( ShapeUtil::MakeShape(operand_element_type, window_size), "window"); SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(), b()); llvm::SmallVector<llvm::Value*> operand_multi_index(source_index.size()); llvm::Value* in_bounds_condition = b()->getTrue(); for (int64_t i = 0; i < rank; ++i) { llvm::Value* strided_index = NSWMul(source_index[i], b()->getInt64(window.dimensions(i).stride())); operand_multi_index[i] = NSWSub(NSWAdd(strided_index, window_index[i]), b()->getInt64(window.dimensions(i).padding_low())); llvm::Value* index_condition = ICmpULT(operand_multi_index[i], b()->getInt64(ShapeUtil::GetDimension(operand->shape(), i))); in_bounds_condition = And(in_bounds_condition, index_condition); } CHECK(in_bounds_condition != nullptr); llvm_ir::LlvmIfData if_in_bounds = llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", b()); SetToFirstInsertPoint(if_in_bounds.true_block, b()); llvm_ir::LlvmIfData if_initialized = llvm_ir::EmitIfThenElse(Load(initialized_flag_address->getAllocatedType(), initialized_flag_address), "initialized", b()); SetToFirstInsertPoint(if_initialized.false_block, b()); const auto save_operand_index = [&](const llvm_ir::IrArray::Index& operand_index) { for (int64_t i = 0; i < rank; ++i) { llvm::Value* selected_index_address_slot = InBoundsGEP(selected_index_address->getAllocatedType(), selected_index_address, {b()->getInt32(i)}); Store(operand_index[i], selected_index_address_slot); } }; llvm_ir::IrArray::Index operand_index( operand_multi_index, operand_array.GetShape(), b()->getInt64Ty()); llvm::Value* operand_data = operand_array.EmitReadArrayElement(operand_index, b()); Store(operand_data, selected_value_address); save_operand_index(operand_index); Store(b()->getInt1(true), initialized_flag_address); SetToFirstInsertPoint(if_initialized.true_block, b()); llvm::Value* operand_address = operand_array.EmitArrayElementAddress(operand_index, b()); llvm::Value* operand_element = Load(operand_array.GetElementLlvmType(), operand_address); llvm::Value* result = EmitScalarReturningThreadLocalCall( *select_and_scatter->select(), {Load(selected_value_address->getAllocatedType(), selected_value_address), operand_element}, "select_function"); llvm::Value* cond = ICmpNE( result, llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0), "boolean_predicate"); llvm_ir::LlvmIfData if_select_lhs = llvm_ir::EmitIfThenElse(cond, "if-select-lhs", b()); SetToFirstInsertPoint(if_select_lhs.false_block, b()); Store(Load(operand_array.GetElementLlvmType(), operand_address), selected_value_address); save_operand_index(operand_index); SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(), b()); llvm::SmallVector<llvm::Value*> selected_multi_index; for (int64_t i = 0; i < rank; ++i) { const std::vector<llvm::Value*> gep_index = {b()->getInt32(i)}; llvm::Value* selected_index_address_slot = InBoundsGEP(selected_index_address->getAllocatedType(), selected_index_address, gep_index); llvm::Type* type = llvm::GetElementPtrInst::getIndexedType( selected_index_address->getAllocatedType(), gep_index); selected_multi_index.push_back(Load(type, selected_index_address_slot)); } llvm::Value* source_value = source_array.EmitReadArrayElement(source_index, b()); llvm_ir::IrArray::Index selected_index( selected_multi_index, output_array.GetShape(), source_index.GetType()); llvm::Value* output_value = output_array.EmitReadArrayElement(selected_index, b()); llvm::Value* scatter_value = EmitScalarReturningThreadLocalCall( *select_and_scatter->scatter(), {output_value, source_value}, "scatter_function"); output_array.EmitWriteArrayElement(selected_index, scatter_value, b()); SetToFirstInsertPoint(source_loops.GetOuterLoopExitBasicBlock(), b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleDot(HloInstruction* dot) { auto lhs = dot->operand(0); auto rhs = dot->operand(1); TF_RETURN_IF_ERROR(ElementTypesSameAndSupported( *dot, {lhs, rhs}, {PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128})); const DotDimensionNumbers& dnums = dot->dot_dimension_numbers(); if (dnums.lhs_contracting_dimensions_size() != 1) { return Unimplemented( "Dot with multiple contracting dimensions not implemented."); } llvm_ir::IrArray lhs_array(GetIrArrayFor(lhs)); llvm_ir::IrArray rhs_array(GetIrArrayFor(rhs)); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dot)); llvm_ir::IrArray target_array = GetIrArrayFor(dot); VLOG(2) << "HandleDot: "; VLOG(2) << " lhs operand: " << llvm_ir::DumpToString(lhs_array.GetBasePointer()); VLOG(2) << " rhs operand: " << llvm_ir::DumpToString(rhs_array.GetBasePointer()); VLOG(2) << " target: " << llvm_ir::DumpToString(target_array.GetBasePointer()); return EmitDotOperation(*dot, target_array, lhs_array, rhs_array, nullptr, GetExecutableRunOptionsArgument(), b(), hlo_module_config_, target_machine_features_); } absl::Status IrEmitter::HandleConvolution(HloInstruction* convolution) { auto lhs = convolution->operand(0); auto rhs = convolution->operand(1); TF_RETURN_IF_ERROR(ElementTypesSameAndSupported( *convolution, {lhs, rhs}, {PRED, S8, U8, S16, U16, S32, U32, S64, U64, F16, F32, F64, C64, C128})); if (PotentiallyImplementedAsEigenConvolution(*convolution, target_machine_features_)) { const Shape& lhs_shape = lhs->shape(); const Shape& rhs_shape = rhs->shape(); const Shape& convolution_shape = convolution->shape(); if (LayoutUtil::IsMonotonicWithDim0Major(lhs_shape.layout()) && LayoutUtil::IsMonotonicWithDim0Major(rhs_shape.layout()) && LayoutUtil::IsMonotonicWithDim0Major(convolution_shape.layout())) { bool one_dim_convolution = lhs_shape.dimensions_size() == 3; llvm::Value* lhs_address = GetEmittedValueFor(lhs); llvm::Value* rhs_address = GetEmittedValueFor(rhs); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(convolution)); const ConvolutionDimensionNumbers& dnums = convolution->convolution_dimension_numbers(); absl::InlinedVector<int64_t, 2> input_dims; absl::InlinedVector<int64_t, 2> kernel_dims; absl::InlinedVector<int64_t, 2> output_dims; if (one_dim_convolution) { input_dims.push_back(1); kernel_dims.push_back(1); output_dims.push_back(1); } const Shape& input_shape = convolution->operand(0)->shape(); int64_t input_batch = input_shape.dimensions(dnums.input_batch_dimension()); for (int d : dnums.input_spatial_dimensions()) { input_dims.push_back(input_shape.dimensions(d)); } int64_t input_channels = input_shape.dimensions(dnums.input_feature_dimension()); const Shape& kernel_shape = convolution->operand(1)->shape(); for (int d : dnums.kernel_spatial_dimensions()) { kernel_dims.push_back(kernel_shape.dimensions(d)); } int64_t kernel_channels = kernel_shape.dimensions(dnums.kernel_input_feature_dimension()); int64_t kernel_filters = kernel_shape.dimensions(dnums.kernel_output_feature_dimension()); const Shape& convolution_shape = convolution->shape(); for (int d : dnums.output_spatial_dimensions()) { output_dims.push_back(convolution_shape.dimensions(d)); } const Window& window = convolution->window(); absl::InlinedVector<int64_t, 2> strides; absl::InlinedVector<std::pair<int64_t, int64_t>, 2> padding; absl::InlinedVector<int64_t, 2> base_dilation; absl::InlinedVector<int64_t, 2> window_dilation; if (one_dim_convolution) { strides.push_back(1); padding.push_back({0, 0}); base_dilation.push_back(1); window_dilation.push_back(1); } for (const auto& d : window.dimensions()) { strides.push_back(d.stride()); padding.push_back({d.padding_low(), d.padding_high()}); base_dilation.push_back(d.base_dilation()); window_dilation.push_back(d.window_dilation()); } PrimitiveType primitive_type = lhs->shape().element_type(); bool multi_threaded = hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen(); bool use_mkl_dnn = hlo_module_config_.debug_options().xla_cpu_use_mkl_dnn() && convolution->feature_group_count() == 1; bool use_acl = hlo_module_config_.debug_options().xla_cpu_use_acl(); auto valid_num_dims = [](absl::Span<const int64_t> xs) { return xs.size() >= 2 && xs.size() <= 3; }; TF_RET_CHECK(valid_num_dims(input_dims)) << input_dims.size(); TF_RET_CHECK(valid_num_dims(kernel_dims)); TF_RET_CHECK(valid_num_dims(output_dims)); TF_RET_CHECK(valid_num_dims(strides)); TF_RET_CHECK(padding.size() >= 2 && padding.size() <= 3); TF_RET_CHECK(valid_num_dims(base_dilation)); TF_RET_CHECK(valid_num_dims(window_dilation)); const char* fn_name; if (input_dims.size() == 2) { fn_name = primitive_type == F16 ? (multi_threaded ? runtime::kEigenConv2DF16SymbolName : runtime::kEigenSingleThreadedConv2DF16SymbolName) : (multi_threaded ? (use_mkl_dnn ? runtime::kMKLConv2DF32SymbolName : (use_acl ? runtime::kACLConv2DF32SymbolName : runtime::kEigenConv2DF32SymbolName)) : runtime::kEigenSingleThreadedConv2DF32SymbolName); } else if (input_dims.size() == 3) { fn_name = primitive_type == F16 ? (multi_threaded ? runtime::kEigenConv3DF16SymbolName : runtime::kEigenSingleThreadedConv3DF16SymbolName) : (multi_threaded ? runtime::kEigenConv3DF32SymbolName : runtime::kEigenSingleThreadedConv3DF32SymbolName); } else { LOG(FATAL) << "Invalid number of dimensions " << input_dims.size(); } if (!multi_threaded && use_mkl_dnn) { LOG(WARNING) << "Using Eigen instead of MKL-DNN for single-threaded " "convolution."; } std::vector<llvm::Value*> args = { GetExecutableRunOptionsArgument(), GetEmittedValueFor(convolution), lhs_address, rhs_address, b()->getInt64(input_batch), }; for (int64_t d : input_dims) { args.push_back(b()->getInt64(d)); } args.push_back(b()->getInt64(input_channels)); for (int64_t d : kernel_dims) { args.push_back(b()->getInt64(d)); } args.push_back(b()->getInt64(kernel_channels)); args.push_back(b()->getInt64(kernel_filters)); for (int64_t d : output_dims) { args.push_back(b()->getInt64(d)); } for (int64_t d : strides) { args.push_back(b()->getInt64(d)); } for (const auto& p : padding) { args.push_back(b()->getInt64(p.first)); args.push_back(b()->getInt64(p.second)); } for (int64_t d : base_dilation) { args.push_back(b()->getInt64(d)); } for (int64_t d : window_dilation) { args.push_back(b()->getInt64(d)); } args.push_back(b()->getInt64(convolution->feature_group_count())); VLOG(1) << "Ir emitter emitted Convolution to runtime:" << fn_name; EmitCallToFunc(fn_name, args, b()->getVoidTy(), true, true); return absl::OkStatus(); } } return DefaultAction(convolution); } absl::Status IrEmitter::HandleFft(HloInstruction* fft) { auto operand = fft->operand(0); TF_RETURN_IF_ERROR(ElementTypesSameAndSupported( *fft, {operand}, {F32, F64, C64, C128})); TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(operand->shape().layout())); TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major(fft->shape().layout())); VLOG(3) << "operand=" << ShapeUtil::HumanStringWithLayout(operand->shape()); VLOG(3) << "fft=" << ShapeUtil::HumanStringWithLayout(fft->shape()); llvm::Value* operand_address = GetEmittedValueFor(operand); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fft)); const std::vector<int64_t>& fft_length = fft->fft_length(); const int fft_rank = fft_length.size(); absl::InlinedVector<int64_t, 4> operand_shape_flat(fft_rank + 1); int64_t input_batch = 1; int64_t input_batch_length = fft->shape().dimensions_size() - fft_rank; for (int i = 0; i < input_batch_length; i++) { input_batch *= operand->shape().dimensions(i); } operand_shape_flat[0] = input_batch; for (int i = 0; i < fft_rank; ++i) { operand_shape_flat[i + 1] = operand->shape().dimensions(i + input_batch_length); } bool multi_threaded_eigen = hlo_module_config_.debug_options().xla_cpu_multi_thread_eigen(); const char* fn_name = multi_threaded_eigen ? runtime::kDuccFftSymbolName : runtime::kDuccSingleThreadedFftSymbolName; auto* fft_lengths = EmitGlobalForLiteral(LiteralUtil::CreateR1<int64_t>(fft_length)); auto* input_shape = EmitGlobalForLiteral(LiteralUtil::CreateR1<int64_t>(operand_shape_flat)); EmitCallToFunc(fn_name, {GetExecutableRunOptionsArgument(), GetEmittedValueFor(fft), operand_address, b()->getInt32(fft->fft_type()), b()->getInt32(operand->shape().element_type() == F64 || operand->shape().element_type() == C128), b()->getInt32(fft_rank), input_shape, fft_lengths}, b()->getVoidTy(), true, false, true); return absl::OkStatus(); } absl::Status IrEmitter::HandleAllReduceSingleReplica(HloInstruction* crs) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs)); if (crs->operand_count() == 1) { return EmitMemcpy(*crs->operand(0), *crs); } std::vector<llvm::Value*> operand_ptrs; for (int64_t i = 0; i < crs->operand_count(); ++i) { llvm::Value* in_ptr = GetEmittedValueFor(crs->operand(i)); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice, assignment_.GetUniqueSlice(crs, {i})); const Shape& operand_shape = crs->operand(i)->shape(); CHECK(operand_shape.IsArray()) << "Operands to all-reduce must be arrays: " << crs->ToString(); operand_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape)); MemCpy(operand_ptrs.back(), llvm::Align(1), in_ptr, llvm::Align(1), ShapeUtil::ByteSizeOf(operand_shape)); } llvm_ir::EmitTuple(GetIrArrayFor(crs), operand_ptrs, b()); return absl::OkStatus(); } static bool DataTypeIsSupportedByReduceScatter(PrimitiveType datatype) { switch (datatype) { case PRED: case S8: case U8: case S16: case U16: case S32: case U32: case S64: case U64: case F16: case F32: case F64: case C64: case C128: return true; default: return false; } } absl::Status IrEmitter::HandleAllReduceMultipleReplica(HloInstruction* crs) { CHECK_GE(crs->operand_count(), 1); PrimitiveType datatype = crs->operand(0)->shape().element_type(); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(crs)); if (!DataTypeIsSupportedByReduceScatter(datatype)) { return Unimplemented("AllReduce for datatype '%s' is not supported", primitive_util::LowercasePrimitiveTypeName(datatype)); } if (!MatchReductionComputation(crs->to_apply()).has_value()) { return Unimplemented("AllReduce for computation '%s' is not supported", crs->to_apply()->ToString()); } std::string replica_groups = ReplicaGroupsToString(crs->replica_groups()); int32_t replica_groups_size = replica_groups.size(); llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups); bool is_tuple = crs->operand_count() > 1; std::vector<llvm::Value*> input_buffer_ptrs; std::vector<llvm::Value*> output_buffer_ptrs; if (is_tuple) { CHECK(crs->shape().IsTuple()); for (int64_t i = 0; i < crs->operand_count(); i++) { const HloInstruction* op = crs->operand(i); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice, assignment_.GetUniqueSlice(crs, {i})); const Shape& operand_shape = crs->operand(i)->shape(); CHECK(operand_shape.IsArray()) << "Operands to all-reduce must be arrays: " << crs->ToString(); output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape)); input_buffer_ptrs.push_back(GetEmittedValueFor(op)); } } else { Shape shape = crs->operand(0)->shape(); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice, assignment_.GetUniqueSlice(crs->operand(0), {})); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice, assignment_.GetUniqueSlice(crs, {})); input_buffer_ptrs.push_back(EmitBufferPointer(input_slice, shape)); output_buffer_ptrs.push_back(EmitBufferPointer(output_slice, shape)); } llvm::Value* input_buffers = EncodeArrayFunctionArguments(input_buffer_ptrs, "input_buffers", b()); llvm::Value* output_buffers = EncodeArrayFunctionArguments(output_buffer_ptrs, "output_buffers", b()); int32_t shape_length; TF_ASSIGN_OR_RETURN(llvm::Value * shape_ptr, llvm_ir::EncodeSelfDescribingShapeConstant( crs->shape(), &shape_length, b())); bool use_global_device_ids = Cast<HloAllReduceInstruction>(crs)->use_global_device_ids(); EmitCallToFunc( runtime::kAllReduceSymbolName, {GetExecutableRunOptionsArgument(), replica_groups_v, b()->getInt32(replica_groups_size), b()->getInt32(static_cast<int32_t>(crs->channel_id().has_value())), b()->getInt32(static_cast<int32_t>(use_global_device_ids)), b()->getInt64(crs->channel_id().has_value() ? *crs->channel_id() : crs->GetModule()->unique_id()), b()->getInt32( static_cast<int32_t>(*MatchReductionComputation(crs->to_apply()))), shape_ptr, b()->getInt32(shape_length), b()->getInt32(crs->operand_count()), input_buffers, output_buffers}, b()->getVoidTy()); return absl::OkStatus(); } absl::Status IrEmitter::HandleAllReduce(HloInstruction* crs) { if (hlo_module_config_.replica_count() == 1 && hlo_module_config_.num_partitions() == 1) { return HandleAllReduceSingleReplica(crs); } return HandleAllReduceMultipleReplica(crs); } absl::Status IrEmitter::HandleReduceScatter(HloInstruction* rs) { CHECK_EQ(rs->operand_count(), 1); PrimitiveType datatype = rs->operand(0)->shape().element_type(); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rs)); if (!DataTypeIsSupportedByReduceScatter(datatype)) { return Unimplemented("ReduceScatter for datatype '%s' is not supported", primitive_util::LowercasePrimitiveTypeName(datatype)); } if (!MatchReductionComputation(rs->to_apply()).has_value()) { return Unimplemented("ReduceScatter for computation '%s' is not supported", rs->to_apply()->ToString()); } std::string replica_groups = ReplicaGroupsToString(rs->replica_groups()); int32_t replica_groups_size = replica_groups.size(); llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups); Shape shape = rs->operand(0)->shape(); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice, assignment_.GetUniqueSlice(rs->operand(0), {})); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice, assignment_.GetUniqueSlice(rs, {})); llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape); llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape); bool use_global_device_ids = Cast<HloReduceScatterInstruction>(rs)->use_global_device_ids(); EmitCallToFunc( runtime::kReduceScatterSymbolName, {GetExecutableRunOptionsArgument(), replica_groups_v, b()->getInt32(replica_groups_size), b()->getInt32(static_cast<int32_t>(rs->channel_id().has_value())), b()->getInt32(static_cast<int32_t>(use_global_device_ids)), b()->getInt64(rs->channel_id().has_value() ? *rs->channel_id() : rs->GetModule()->unique_id()), b()->getInt32( static_cast<int32_t>(*MatchReductionComputation(rs->to_apply()))), b()->getInt32(static_cast<int32_t>(datatype)), b()->getInt64(ShapeUtil::ElementsIn(rs->shape())), input_buffer, output_buffer}, b()->getVoidTy()); return absl::OkStatus(); } absl::Status IrEmitter::HandleAllToAll(HloInstruction* instruction) { auto* instr = Cast<HloAllToAllInstruction>(instruction); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction)); CHECK(!instr->split_dimension() && instr->shape().IsTuple()) << "Only tuple AllToAll is supported"; std::string replica_groups = ReplicaGroupsToString(instruction->replica_groups()); int32_t replica_groups_size = replica_groups.size(); llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups); int64_t buffer_size = -1; std::vector<llvm::Value*> input_buffer_ptrs; std::vector<llvm::Value*> output_buffer_ptrs; for (int64_t i = 0; i < instruction->operand_count(); i++) { const HloInstruction* op = instruction->operand(i); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice, assignment_.GetUniqueSlice(instruction, {i})); const Shape& operand_shape = instruction->operand(i)->shape(); CHECK(operand_shape.IsArray()) << "Operands to all-to-all must be arrays: " << instruction->ToString(); output_buffer_ptrs.push_back(EmitBufferPointer(out_slice, operand_shape)); input_buffer_ptrs.push_back(GetEmittedValueFor(op)); CHECK(buffer_size == -1 || buffer_size == out_slice.size()); buffer_size = out_slice.size(); } llvm::Value* input_buffers = EncodeArrayFunctionArguments(input_buffer_ptrs, "input_buffers", b()); llvm::Value* output_buffers = EncodeArrayFunctionArguments(output_buffer_ptrs, "output_buffers", b()); EmitCallToFunc( runtime::kAllToAllSymbolName, { GetExecutableRunOptionsArgument(), b()->getInt32( static_cast<int32_t>(instruction->channel_id().has_value())), b()->getInt64(instruction->channel_id().has_value() ? *instruction->channel_id() : instruction->GetModule()->unique_id()), replica_groups_v, b()->getInt32(replica_groups_size), b()->getInt32(instruction->operand_count()), b()->getInt64(buffer_size), input_buffers, output_buffers, }, b()->getVoidTy()); llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleAllGather(HloInstruction* instruction) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instruction)); std::string replica_groups = ReplicaGroupsToString(instruction->replica_groups()); int32_t replica_groups_size = replica_groups.size(); llvm::Value* replica_groups_v = b()->CreateGlobalStringPtr(replica_groups); std::vector<llvm::Value*> input_buffer_ptrs; std::vector<llvm::Value*> output_buffer_ptrs; const HloInstruction* op = instruction->operand(0); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice in_slice, assignment_.GetUniqueSlice(op, {})); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_slice, assignment_.GetUniqueSlice(instruction, {})); const Shape& operand_shape = op->shape(); CHECK(op->shape().IsArray()) << "Operand to all-gather must be arrays: " << instruction->ToString(); llvm::Value* output_buffer = EmitBufferPointer(out_slice, operand_shape); llvm::Value* input_buffer = GetEmittedValueFor(op); int64_t buffer_size = in_slice.size(); bool use_global_device_ids = Cast<HloAllGatherInstruction>(instruction)->use_global_device_ids(); EmitCallToFunc( runtime::kAllGatherSymbolName, { GetExecutableRunOptionsArgument(), b()->getInt32( static_cast<int32_t>(instruction->channel_id().has_value())), b()->getInt32(static_cast<int32_t>(use_global_device_ids)), b()->getInt64(instruction->channel_id().has_value() ? *instruction->channel_id() : instruction->GetModule()->unique_id()), replica_groups_v, b()->getInt32(replica_groups_size), b()->getInt64(buffer_size), input_buffer, output_buffer, }, b()->getVoidTy()); llvm_ir::EmitTuple(GetIrArrayFor(instruction), output_buffer_ptrs, b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleCollectivePermute(HloInstruction* crs) { auto* instr = Cast<HloCollectivePermuteInstruction>(crs); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(instr)); std::string source_target_pairs = absl::StrJoin( instr->source_target_pairs(), ",", absl::PairFormatter("=")); llvm::Value* source_target_pairs_v = b()->CreateGlobalStringPtr(source_target_pairs); Shape shape = crs->operand(0)->shape(); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice input_slice, assignment_.GetUniqueSlice(crs->operand(0), {})); llvm::Value* input_buffer = EmitBufferPointer(input_slice, shape); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice, assignment_.GetUniqueSlice(crs, {})); llvm::Value* output_buffer = EmitBufferPointer(output_slice, shape); EmitCallToFunc( runtime::kCollectivePermuteSymbolName, {GetExecutableRunOptionsArgument(), b()->getInt32(static_cast<int32_t>(crs->channel_id().has_value())), b()->getInt64(crs->channel_id().has_value() ? *crs->channel_id() : crs->GetModule()->unique_id()), b()->getInt32(ShapeUtil::ByteSizeOf(shape)), input_buffer, output_buffer, source_target_pairs_v, b()->getInt32(source_target_pairs.size())}, b()->getVoidTy()); return absl::OkStatus(); } absl::Status IrEmitter::HandlePartitionId(HloInstruction* hlo) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo)); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice, assignment_.GetUniqueSlice(hlo, {})); llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape()); EmitCallToFunc(runtime::kPartitionIdSymbolName, {GetExecutableRunOptionsArgument(), output_buffer}, b()->getVoidTy()); return absl::OkStatus(); } absl::Status IrEmitter::HandleReplicaId(HloInstruction* hlo) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo)); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice output_slice, assignment_.GetUniqueSlice(hlo, {})); llvm::Value* output_buffer = EmitBufferPointer(output_slice, hlo->shape()); EmitCallToFunc(runtime::kReplicaIdSymbolName, {GetExecutableRunOptionsArgument(), output_buffer}, b()->getVoidTy()); return absl::OkStatus(); } absl::Status IrEmitter::HandleParameter(HloInstruction* parameter) { VLOG(2) << "HandleParameter: " << parameter->ToString(); return EmitTargetAddressForOp(parameter); } static bool ReductionPreservesLayout(const HloInstruction& reduce) { DCHECK_EQ(reduce.opcode(), HloOpcode::kReduce); absl::flat_hash_map<int64_t, int64_t> unreduced_dim_map; absl::flat_hash_set<int64_t> reduced_dims(reduce.dimensions().begin(), reduce.dimensions().end()); const Shape& operand_shape = reduce.operand(0)->shape(); const Shape& result_shape = reduce.shape(); int64_t delta = 0; for (int64_t i = 0; i < operand_shape.dimensions_size(); i++) { if (reduced_dims.contains(i)) { delta++; } else { InsertOrDie(&unreduced_dim_map, i, i - delta); } } int64_t result_dim_idx = 0; for (int64_t operand_dim_idx = 0; operand_dim_idx < operand_shape.dimensions_size(); operand_dim_idx++) { int64_t operand_dim = operand_shape.layout().minor_to_major(operand_dim_idx); if (!reduced_dims.contains(operand_dim)) { if (FindOrDie(unreduced_dim_map, operand_dim) != result_shape.layout().minor_to_major(result_dim_idx++)) { return false; } } } CHECK_EQ(result_dim_idx, result_shape.dimensions_size()); return true; } IrEmitter::ReductionGenerator IrEmitter::MatchReductionGenerator( HloComputation* function, std::string* failure_reason) const { CHECK_EQ(function->num_parameters(), 2); auto root_instruction = function->root_instruction(); CHECK(ShapeUtil::IsScalar(root_instruction->shape())); if (root_instruction->operand_count() != 2) { *failure_reason = "root instruction is not a binary operation"; return nullptr; } const Shape& root_shape = root_instruction->shape(); if (ShapeUtil::ElementIsComplex(root_shape)) { *failure_reason = "complex values not supported"; return nullptr; } bool root_is_floating_point = ShapeUtil::ElementIsFloating(root_shape); bool root_is_integral = ShapeUtil::ElementIsIntegral(root_shape); bool root_is_signed = ShapeUtil::ElementIsSigned(root_shape); auto lhs = root_instruction->operand(0); auto rhs = root_instruction->operand(1); auto param_0 = function->parameter_instruction(0); auto param_1 = function->parameter_instruction(1); if (!(lhs == param_0 && rhs == param_1) && !(rhs == param_0 && lhs == param_1)) { *failure_reason = "root instruction is not a binary operation on the incoming arguments"; return nullptr; } CHECK(ShapeUtil::IsScalar(lhs->shape()) && ShapeUtil::IsScalar(rhs->shape())); switch (root_instruction->opcode()) { default: *failure_reason = "did not recognize root instruction opcode"; return nullptr; case HloOpcode::kAdd: return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) { return root_is_integral ? b->CreateAdd(lhs, rhs) : b->CreateFAdd(lhs, rhs); }; case HloOpcode::kMultiply: return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) { return root_is_integral ? b->CreateMul(lhs, rhs) : b->CreateFMul(lhs, rhs); }; case HloOpcode::kAnd: return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) { return b->CreateAnd(lhs, rhs); }; case HloOpcode::kOr: return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) { return b->CreateOr(lhs, rhs); }; case HloOpcode::kXor: return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) { return b->CreateXor(lhs, rhs); }; case HloOpcode::kMaximum: return [root_is_floating_point, root_is_signed, this]( llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) -> llvm::Value* { if (root_is_floating_point) { return llvm_ir::EmitFloatMax( lhs, rhs, b, hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max()); } return b->CreateSelect( b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SGE : llvm::ICmpInst::ICMP_UGE, lhs, rhs), lhs, rhs); }; case HloOpcode::kMinimum: return [root_is_floating_point, root_is_signed, this]( llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) -> llvm::Value* { if (root_is_floating_point) { return llvm_ir::EmitFloatMin( lhs, rhs, b, hlo_module_config_.debug_options().xla_cpu_enable_fast_min_max()); } return b->CreateSelect( b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SLE : llvm::ICmpInst::ICMP_ULE, lhs, rhs), lhs, rhs); }; } } IrEmitter::ShardedVectorType IrEmitter::CreateShardedVectorType( PrimitiveType element_type, unsigned element_count) { int vector_register_size_in_elements = target_machine_features_.vector_register_byte_size( *compute_function()->function()) / ShapeUtil::ByteSizeOfPrimitiveType(element_type); ShardedVectorType sharded_vector_type; llvm::Type* element_ir_type = llvm_ir::PrimitiveTypeToIrType(element_type, module_); for (int i = 0, e = 1 + Log2Ceiling(element_count); i < e; i++) { const unsigned current_size_fragment = 1u << i; if (!(element_count & current_size_fragment)) { continue; } if (current_size_fragment == 1) { sharded_vector_type.push_back(element_ir_type); continue; } if (current_size_fragment >= vector_register_size_in_elements) { auto vector_type = llvm::VectorType::get( element_ir_type, vector_register_size_in_elements, false); sharded_vector_type.insert( sharded_vector_type.end(), current_size_fragment / vector_register_size_in_elements, vector_type); CHECK_EQ(current_size_fragment % vector_register_size_in_elements, 0); continue; } sharded_vector_type.push_back( llvm::VectorType::get(element_ir_type, current_size_fragment, false)); } return sharded_vector_type; } absl::StatusOr<IrEmitter::ShardedVector> IrEmitter::EmitInnerLoopForVectorizedReduction( const ReductionGenerator& reduction_generator, const llvm_ir::IrArray::Index& output_index, const ShardedVectorType& accumulator_type, HloInstruction* init_value, HloInstruction* arg, absl::Span<const int64_t> dimensions, llvm::Align element_alignment) { ShardedVector accumulator; accumulator.reserve(accumulator_type.size()); for (auto accumulator_shard_type : accumulator_type) { accumulator.push_back(llvm_ir::EmitAllocaAtFunctionEntry( accumulator_shard_type, "accumulator", b(), 0)); } llvm::Value* init_value_ssa = Load(IrShapeType(init_value->shape()), GetEmittedValueFor(init_value)); for (llvm::Value* accumulator_shard : accumulator) { llvm::Value* initial_value; auto shard_type = llvm::cast<llvm::AllocaInst>(accumulator_shard)->getAllocatedType(); if (auto vector_type = llvm::dyn_cast<llvm::VectorType>(shard_type)) { initial_value = VectorSplat(vector_type->getElementCount(), init_value_ssa); } else { initial_value = init_value_ssa; } AlignedStore(initial_value, accumulator_shard, element_alignment); } llvm_ir::ForLoopNest reduction_loop_nest(IrName(arg, "vectorized_inner"), b()); std::vector<llvm::Value*> input_multi_index = reduction_loop_nest.AddLoopsForShapeOnDimensions(arg->shape(), dimensions, "reduction_dim"); SetToFirstInsertPoint(reduction_loop_nest.GetInnerLoopBodyBasicBlock(), b()); llvm_ir::IrArray arg_array(GetIrArrayFor(arg)); llvm_ir::IrArray::Index::const_iterator it = output_index.begin(); for (auto& i : input_multi_index) { if (i == nullptr) { i = *it++; } } CHECK(output_index.end() == it); llvm_ir::IrArray::Index input_index(input_multi_index, arg->shape(), b()->getInt64Ty()); llvm::Value* input_address = arg_array.EmitArrayElementAddress(input_index, b()); for (int i = 0; i < accumulator.size(); i++) { auto alloca = llvm::cast<llvm::AllocaInst>(accumulator[i]); auto current_accumulator_value = AlignedLoad( alloca->getAllocatedType(), accumulator[i], element_alignment); auto addend = AlignedLoad(alloca->getAllocatedType(), input_address, element_alignment); arg_array.AnnotateLoadStoreInstructionWithMetadata(addend); auto reduced_result = reduction_generator(b(), current_accumulator_value, addend); AlignedStore(reduced_result, accumulator[i], element_alignment); if (i != (accumulator.size() - 1)) { input_address = ConstInBoundsGEP1_32(reduced_result->getType(), input_address, 1); } } SetToFirstInsertPoint(reduction_loop_nest.GetOuterLoopExitBasicBlock(), b()); ShardedVector result_ssa; result_ssa.reserve(accumulator.size()); for (auto accumulator_shard : accumulator) { auto alloca = llvm::cast<llvm::AllocaInst>(accumulator_shard); result_ssa.push_back(AlignedLoad(alloca->getAllocatedType(), accumulator_shard, element_alignment)); } return result_ssa; } void IrEmitter::EmitShardedVectorStore( llvm::Value* store_address, const std::vector<llvm::Value*>& value_to_store, llvm::Align alignment, const llvm_ir::IrArray& containing_array) { for (int i = 0; i < value_to_store.size(); i++) { auto store_instruction = AlignedStore(value_to_store[i], store_address, alignment); containing_array.AnnotateLoadStoreInstructionWithMetadata( store_instruction); if (i != (value_to_store.size() - 1)) { store_address = ConstInBoundsGEP1_32(value_to_store[i]->getType(), store_address, 1); } } } absl::StatusOr<bool> IrEmitter::EmitVectorizedReduce( HloInstruction* reduce, HloInstruction* arg, HloInstruction* init_value, absl::Span<const int64_t> dimensions, HloComputation* function, std::string* failure_reason) { if (!reduce->shape().IsArray()) { *failure_reason = "vectorization of variadic reduce not implemented"; return false; } if (!ReductionPreservesLayout(*reduce)) { return false; } ReductionGenerator reduction_generator = MatchReductionGenerator(function, failure_reason); if (!reduction_generator) { return false; } int vector_register_size_in_elements = target_machine_features_.vector_register_byte_size( *compute_function()->function()) / ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type()); if (vector_register_size_in_elements == 0) { return false; } int vectorization_factor_in_bytes = target_machine_features_.vectorization_factor_in_bytes(); const int vectorization_factor = vectorization_factor_in_bytes / ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type()); bool is_reduction_over_minor_dimension = absl::c_linear_search( dimensions, LayoutUtil::Minor(arg->shape().layout(), 0)); llvm::Align element_alignment(tsl::MathUtil::GCD<unsigned>( ShapeUtil::ByteSizeOfPrimitiveType(reduce->shape().element_type()), MinimumAlignmentForPrimitiveType(reduce->shape().element_type()))); if (is_reduction_over_minor_dimension) { *failure_reason = "reduction over minor dimension not implemented"; return false; } CHECK(!reduce->shape().IsTuple()); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(reduce)); llvm_ir::ForLoopNest loop_nest(IrName(reduce), b()); std::vector<llvm::Value*> array_multi_index( reduce->shape().dimensions_size()); for (int i = LayoutUtil::MinorToMajor(reduce->shape()).size() - 1; i > 0; --i) { int64_t dimension = LayoutUtil::Minor(reduce->shape().layout(), i); int64_t start_index = 0; int64_t end_index = reduce->shape().dimensions(dimension); std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop( start_index, end_index, absl::StrFormat("dim.%d", dimension)); array_multi_index[dimension] = loop->GetIndVarValue(); } int64_t innermost_dimension = LayoutUtil::Minor(reduce->shape().layout(), 0); int64_t innermost_dimension_size = reduce->shape().dimensions(innermost_dimension); if (llvm::BasicBlock* innermost_body_bb = loop_nest.GetInnerLoopBodyBasicBlock()) { SetToFirstInsertPoint(innermost_body_bb, b()); } auto outermost_loop_exit_block = loop_nest.GetOuterLoopExitBasicBlock(); if (innermost_dimension_size >= vectorization_factor) { int64_t start_index = 0; int64_t end_index = (innermost_dimension_size / vectorization_factor) * vectorization_factor; std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(start_index, end_index, vectorization_factor, absl::StrFormat("dim.%d", innermost_dimension)); array_multi_index[innermost_dimension] = loop->GetIndVarValue(); SetToFirstInsertPoint(loop->GetBodyBasicBlock(), b()); ShardedVectorType vector_type = CreateShardedVectorType( reduce->shape().element_type(), vectorization_factor); llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(), b()->getInt64Ty()); TF_ASSIGN_OR_RETURN(std::vector<llvm::Value*> accumulator, EmitInnerLoopForVectorizedReduction( reduction_generator, array_index, vector_type, init_value, arg, dimensions, element_alignment)); llvm_ir::IrArray target_array = GetIrArrayFor(reduce); llvm::Value* output_address = target_array.EmitArrayElementAddress(array_index, b()); EmitShardedVectorStore(output_address, accumulator, element_alignment, target_array); if (auto exit_terminator = loop->GetExitBasicBlock()->getTerminator()) { CHECK_GT(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1); b()->SetInsertPoint(exit_terminator); } else { CHECK_EQ(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1); b()->SetInsertPoint(loop->GetExitBasicBlock()); } } if (innermost_dimension_size % vectorization_factor) { array_multi_index[innermost_dimension] = b()->getInt64(innermost_dimension_size - (innermost_dimension_size % vectorization_factor)); ShardedVectorType vector_type = CreateShardedVectorType( reduce->shape().element_type(), innermost_dimension_size % vectorization_factor); llvm_ir::IrArray::Index array_index(array_multi_index, reduce->shape(), b()->getInt64Ty()); llvm::IRBuilderBase::FastMathFlagGuard guard(*b()); llvm::FastMathFlags flags = b()->getFastMathFlags(); flags.setAllowReassoc(true); b()->setFastMathFlags(flags); TF_ASSIGN_OR_RETURN(std::vector<llvm::Value*> accumulator, EmitInnerLoopForVectorizedReduction( reduction_generator, array_index, vector_type, init_value, arg, dimensions, element_alignment)); llvm_ir::IrArray target_array = GetIrArrayFor(reduce); llvm::Value* output_address = target_array.EmitArrayElementAddress(array_index, b()); EmitShardedVectorStore(output_address, accumulator, element_alignment, target_array); } if (outermost_loop_exit_block) { b()->SetInsertPoint(outermost_loop_exit_block); } return true; } absl::Status IrEmitter::HandleReduce(HloInstruction* reduce) { auto arg = reduce->mutable_operand(0); auto init_value = reduce->mutable_operand(1); absl::Span<const int64_t> dimensions(reduce->dimensions()); HloComputation* function = reduce->to_apply(); bool saved_allow_reassociation = allow_reassociation_; allow_reassociation_ = true; auto cleanup = absl::MakeCleanup([saved_allow_reassociation, this]() { allow_reassociation_ = saved_allow_reassociation; }); if (!options::VectorizedReduceDisabled(hlo_module_config_)) { std::string vectorization_failure_reason; TF_ASSIGN_OR_RETURN( bool vectorization_successful, EmitVectorizedReduce(reduce, arg, init_value, dimensions, function, &vectorization_failure_reason)); if (vectorization_successful) { VLOG(1) << "Successfully vectorized reduction " << reduce->ToString() << "\n"; return absl::OkStatus(); } else { VLOG(1) << "Could not vectorize reduction " << reduce->ToString() << ": " << vectorization_failure_reason; } } return DefaultAction(reduce); } absl::Status IrEmitter::HandleSend(HloInstruction* send) { return Unimplemented("Send is not implemented on CPU."); } absl::Status IrEmitter::HandleSendDone(HloInstruction* send_done) { return Unimplemented("Send-done is not implemented on CPU."); } absl::Status IrEmitter::HandleScatter(HloInstruction*) { return Unimplemented("Scatter is not implemented on CPUs."); } absl::Status IrEmitter::HandleSlice(HloInstruction* slice) { VLOG(2) << "HandleSlice: " << slice->ToString(); auto operand = slice->operand(0); if (ShouldEmitParallelLoopFor(*slice)) { return DefaultAction(slice); } if (!LayoutUtil::Equal(operand->shape().layout(), slice->shape().layout())) { return DefaultAction(slice); } TF_RETURN_IF_ERROR(EmitTargetAddressForOp(slice)); if (ShapeUtil::IsZeroElementArray(slice->shape())) { return absl::OkStatus(); } const Layout& layout = operand->shape().layout(); const int64_t num_dims = operand->shape().dimensions_size(); absl::flat_hash_set<int64_t> inner_dims; for (int64_t dim : LayoutUtil::MinorToMajor(layout)) { if (operand->shape().dimensions(dim) != slice->shape().dimensions(dim)) { break; } inner_dims.insert(dim); } const bool is_trivial_copy = (inner_dims.size() == num_dims); if (is_trivial_copy) { if (ShapeUtil::IsEffectiveScalar(slice->shape())) { return DefaultAction(slice); } else { return EmitMemcpy(*slice, *operand); } } const Shape logical_element_shape = ShapeUtil::FilterDimensions( [&inner_dims](int64_t dim) { return inner_dims.contains(dim); }, operand->shape()); const int64_t primitive_elements_per_logical_element = ShapeUtil::ElementsIn(logical_element_shape); const int64_t memcpy_dim = LayoutUtil::Minor(layout, inner_dims.size()); const bool memcpy_is_contiguous = slice->slice_strides(memcpy_dim) == 1; const int64_t memcpy_logical_elements = memcpy_is_contiguous ? slice->slice_limits(memcpy_dim) - slice->slice_starts(memcpy_dim) : 1; llvm::SmallVector<int64_t> outer_dims; for (int64_t i = 0; i < num_dims - inner_dims.size() - 1; ++i) { outer_dims.push_back(LayoutUtil::Major(layout, i)); } if (!memcpy_is_contiguous) { outer_dims.push_back(memcpy_dim); } llvm_ir::IrArray target_array = GetIrArrayFor(slice); const int64_t num_outer_loops = outer_dims.size(); llvm_ir::ForLoopNest loops(IrName(slice), b()); std::vector<llvm::Value*> target_multi_index = loops.AddLoopsForShapeOnDimensions(slice->shape(), outer_dims, "slice"); std::replace(target_multi_index.begin(), target_multi_index.end(), static_cast<llvm::Value*>(nullptr), static_cast<llvm::Value*>(b()->getInt64(0))); llvm_ir::IrArray::Index target_index(target_multi_index, slice->shape(), b()->getInt64Ty()); if (num_outer_loops > 0) { SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b()); } llvm_ir::IrArray source_array = GetIrArrayFor(operand); const llvm_ir::IrArray::Index source_index = target_index.SourceIndexOfSlice( operand->shape(), slice->slice_starts(), slice->slice_strides(), b()); llvm::Value* memcpy_dest = target_array.EmitArrayElementAddress(target_index, b(), "slice.dest"); llvm::Value* memcpy_source = source_array.EmitArrayElementAddress(source_index, b(), "slice.source"); const int64_t memcpy_elements = primitive_elements_per_logical_element * memcpy_logical_elements; EmitTransferElements(memcpy_dest, memcpy_source, memcpy_elements, slice->shape().element_type(), target_array, source_array); if (VLOG_IS_ON(2)) { const int64_t memcpy_bytes = ShapeUtil::ByteSizeOf(logical_element_shape) * memcpy_elements; VLOG(2) << " emitted copy of " << memcpy_bytes << " bytes inside " << num_outer_loops << " loops"; } if (num_outer_loops > 0) { SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b()); } return absl::OkStatus(); } absl::Status IrEmitter::HandleDynamicSlice(HloInstruction* dynamic_slice) { if (ShapeUtil::IsScalar(dynamic_slice->shape())) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_slice)); return EmitMemcpy(*dynamic_slice->operand(0), *dynamic_slice); } return DefaultAction(dynamic_slice); } absl::Status IrEmitter::HandleDynamicUpdateSlice( HloInstruction* dynamic_update_slice) { auto update = dynamic_update_slice->operand(1); if (ShapeUtil::IsScalar(dynamic_update_slice->shape())) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice)); return EmitMemcpy(*update, *dynamic_update_slice); } else if (llvm_ir::CanUpdateDynamicSliceInPlace(dynamic_update_slice, assignment_)) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(dynamic_update_slice)); auto operands = GetIrArraysForOperandsOf(dynamic_update_slice); return llvm_ir::EmitDynamicUpdateSliceInPlace( operands, GetIrArrayFor(dynamic_update_slice), IrName(dynamic_update_slice, "in_place"), b()); } return DefaultAction(dynamic_update_slice); } absl::Status IrEmitter::HandleRecv(HloInstruction* recv) { return Unimplemented("Recv is not implemented on CPU."); } absl::Status IrEmitter::HandleRecvDone(HloInstruction* recv_done) { return Unimplemented("Recv-done is not implemented on CPU."); } absl::Status IrEmitter::HandlePad(HloInstruction* pad) { CHECK_EQ(pad->operand_count(), 2); const auto operand = pad->operand(0); const auto padding_value = pad->operand(1); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(pad)); return HandlePad(pad, GetIrArrayFor(operand), GetIrArrayFor(padding_value), GetIrArrayFor(pad)); } absl::Status IrEmitter::HandlePad(HloInstruction* pad, const llvm_ir::IrArray& operand_array, const llvm_ir::IrArray& padding_value_array, const llvm_ir::IrArray& output_array) { CHECK_EQ(pad->operand_count(), 2); for (auto& padding_dimension : pad->padding_config().dimensions()) { if (padding_dimension.edge_padding_low() < 0 || padding_dimension.edge_padding_high() < 0) { return InternalStrCat( "Encountered negative padding in IrEmitter on CPU. " "This should have been eliminated at the HLO level. ", pad->ToString()); } } const HloInstruction* padding_value = pad->operand(1); const auto index_type = b()->getInt64Ty(); const auto index = llvm_ir::IrArray::Index(index_type); llvm::Value* padding_value_addr = padding_value_array.EmitArrayElementAddress( index, b(), "padding_value_addr", true, nullptr); const llvm_ir::ElementGenerator element_generator = [this, padding_value, padding_value_addr](const llvm_ir::IrArray::Index& target_index) { return b()->CreateLoad(IrShapeType(padding_value->shape()), padding_value_addr); }; TF_RETURN_IF_ERROR(EmitTargetElementLoop( pad, "initialize", element_generator, std::optional<const llvm_ir::IrArray>(output_array))); llvm_ir::ForLoopNest loops(IrName(pad, "assign"), b()); const HloInstruction* operand = pad->operand(0); const llvm_ir::IrArray::Index operand_index = loops.AddLoopsForShape(operand->shape(), "operand"); SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b()); llvm::Value* operand_data = operand_array.EmitReadArrayElement(operand_index, b()); const PaddingConfig& padding_config = pad->padding_config(); std::vector<llvm::Value*> output_multi_index; for (size_t i = 0; i < operand_index.size(); ++i) { llvm::Value* offset = Mul(operand_index[i], b()->getInt64(padding_config.dimensions(i).interior_padding() + 1)); llvm::Value* index = Add( offset, b()->getInt64(padding_config.dimensions(i).edge_padding_low())); output_multi_index.push_back(index); } llvm_ir::IrArray::Index output_index( output_multi_index, output_array.GetShape(), operand_index.GetType()); output_array.EmitWriteArrayElement(output_index, operand_data, b()); SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleFusion(HloInstruction* fusion) { auto* root = fusion->fused_expression_root(); if (llvm_ir::CanEmitFusedDynamicUpdateSliceInPlace(fusion, assignment_)) { VLOG(3) << "HandleFusion FusedDynamicUpdateSliceInPlace"; CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_); FusedIrEmitter fused_emitter(elemental_emitter); BindFusionArguments(fusion, &fused_emitter); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion)); return llvm_ir::EmitFusedDynamicUpdateSliceInPlace( fusion, GetIrArrayFor(fusion), &fused_emitter, b()); } else if (fusion->IsLoopFusion()) { VLOG(3) << "HandleFusion kLoop"; CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_); FusedIrEmitter fused_emitter(elemental_emitter); BindFusionArguments(fusion, &fused_emitter); TF_ASSIGN_OR_RETURN(auto generator, fused_emitter.GetGenerator( *fusion->fused_expression_root())); return EmitTargetElementLoop(fusion, "kLoop_fusion", generator, std::nullopt); } else if (fusion->IsOutputFusion()) { VLOG(3) << "HandleFusion kOutput"; int64_t dot_op_index = root->operand(0)->opcode() == HloOpcode::kDot ? 0 : 1; const HloInstruction* dot = root->operand(dot_op_index); CHECK_EQ(dot->opcode(), HloOpcode::kDot) << dot->ToString() << " " << fusion->fused_instructions_computation()->ToString(); int64_t dot_lhs_param_number = dot->operand(0)->parameter_number(); int64_t dot_rhs_param_number = dot->operand(1)->parameter_number(); int64_t addend_param_number = root->operand(1 - dot_op_index)->parameter_number(); Shape target_shape = fusion->shape(); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(fusion)); llvm_ir::IrArray target_array = GetIrArrayFor(fusion); llvm_ir::IrArray lhs_array( GetIrArrayFor(fusion->operand(dot_lhs_param_number))); llvm_ir::IrArray rhs_array( GetIrArrayFor(fusion->operand(dot_rhs_param_number))); llvm_ir::IrArray addend_array( GetIrArrayFor(fusion->operand(addend_param_number))); TF_RETURN_IF_ERROR( EmitDotOperation(*dot, target_array, lhs_array, rhs_array, &addend_array, GetExecutableRunOptionsArgument(), b(), hlo_module_config_, target_machine_features_)); return absl::OkStatus(); } else { return Unimplemented("Fusion kind not implemented on CPU"); } } absl::Status IrEmitter::HandleCall(HloInstruction* call) { HloComputation* computation = call->to_apply(); llvm::Function* call_ir_function = FindOrDie( emitted_functions_, ComputationToEmit{computation, allow_reassociation_}); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(call)); auto backend_config_or = computation->root_instruction()->backend_config<BackendConfig>(); if (backend_config_or.ok() && !backend_config_or->outer_dimension_partitions().empty()) { std::vector<llvm::Value*> call_args = GetArrayFunctionCallArguments( {}, b(), computation->name(), emitted_value_[call], GetExecutableRunOptionsArgument(), GetBufferTableArgument(), GetStatusArgument(), GetProfileCountersArgument()); HloInstruction* root = computation->root_instruction(); TF_RETURN_IF_ERROR(EmitCallToParallelForkJoin( call_args, root->shape(), backend_config_or->outer_dimension_partitions(), b(), call_ir_function, computation->name())); if (ComputationTransitivelyContainsCustomCall(computation)) { EmitEarlyReturnIfErrorStatus(); } } else { EmitGlobalCall(*computation, computation->name()); } return absl::OkStatus(); } absl::Status IrEmitter::EmitSliceToDynamic( const HloInstruction* hlo, absl::Span<const llvm_ir::IrArray> source_arrays, const llvm_ir::IrArray& target_array) { std::vector<llvm::Value*> dynamic_dims; int32_t raw_data_size = ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(hlo->shape())); llvm::Value* dest_buffer = target_array.GetBasePointer(); for (int64_t i = 1; i < hlo->operand_count(); ++i) { const int64_t dim_index = i - 1; llvm::Value* source_buffer = source_arrays[i].GetBasePointer(); llvm::LoadInst* dyn_dim_size = Load(IrShapeType(hlo->operand(i)->shape()), source_buffer, "dyn_dim_size"); llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32( b()->getInt8Ty(), dest_buffer, raw_data_size + dim_index * sizeof(int32_t)); b()->CreateStore(dyn_dim_size, metadata); dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(), true, "i64_dyn_dim_size")); } auto loop_body_emitter = [&](const llvm_ir::IrArray::Index& array_index) -> absl::Status { llvm::Value* source_element = source_arrays[0].EmitReadArrayElement(array_index, b()); llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b()); llvm_ir::IrArray::Index dest_index(linear_index, target_array.GetShape(), b()); target_array.EmitWriteArrayElement(dest_index, source_element, b()); return absl::OkStatus(); }; return llvm_ir::LoopEmitter(loop_body_emitter, target_array.GetShape(), dynamic_dims, b()) .EmitLoop(IrName(hlo)); } absl::Status IrEmitter::HandleSliceToDynamic(HloInstruction* hlo) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo)); llvm_ir::IrArray target_array = GetIrArrayFor(hlo); std::vector<llvm_ir::IrArray> source_arrays; source_arrays.reserve(hlo->operand_count()); for (auto operand : hlo->operands()) { source_arrays.push_back(GetIrArrayFor(operand)); } return EmitSliceToDynamic(hlo, source_arrays, target_array); } absl::Status IrEmitter::HandlePadToStatic(HloInstruction* hlo) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo)); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice data_slice, assignment_.GetUniqueSlice(hlo, {0})); std::vector<llvm::Value*> dynamic_dims; std::vector<llvm::Value*> tuple_operand_ptrs; const Shape& data_shape = ShapeUtil::GetSubshape(hlo->shape(), {0}); const Shape& input_shape = hlo->operand(0)->shape(); llvm::Value* data_address = EmitBufferPointer(data_slice, data_shape); llvm::Type* data_type = IrShapeType(data_shape); llvm_ir::IrArray data_array(data_address, data_type, data_shape); llvm::Value* source_buffer = GetEmittedValueFor(hlo->operand(0)); int64_t raw_data_size = ShapeUtil::ByteSizeOf(ShapeUtil::MakeStaticShape(input_shape)); tuple_operand_ptrs.push_back(data_array.GetBasePointer()); for (int i = 1; i < hlo->shape().tuple_shapes_size(); ++i) { const Shape& dim_shape = ShapeUtil::GetSubshape(hlo->shape(), {i}); TF_RET_CHECK(Shape::Equal()(dim_shape, ShapeUtil::MakeScalarShape(S32))); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice dim_size_slice, assignment_.GetUniqueSlice(hlo, {i})); llvm::Value* dest_dim_size_address = EmitBufferPointer(dim_size_slice, data_shape); const int64_t dim_index = i - 1; llvm::Value* metadata = b()->CreateConstInBoundsGEP1_32( b()->getInt8Ty(), source_buffer, raw_data_size + dim_index * sizeof(int32_t)); llvm::Value* dyn_dim_size = b()->CreateLoad(b()->getInt32Ty(), metadata, "dyn_dim_size"); b()->CreateStore(dyn_dim_size, dest_dim_size_address); dynamic_dims.push_back(b()->CreateIntCast(dyn_dim_size, b()->getInt64Ty(), true, "i64_dyn_dim_size")); tuple_operand_ptrs.push_back(dest_dim_size_address); } auto loop_body_emitter = [&](const llvm_ir::IrArray::Index& array_index) -> absl::Status { llvm::Value* linear_index = array_index.Linearize(dynamic_dims, b()); llvm_ir::IrArray::Index source_index(linear_index, input_shape, b()); llvm::Value* source_element = GetIrArrayFor(hlo->operand(0)).EmitReadArrayElement(source_index, b()); data_array.EmitWriteArrayElement(array_index, source_element, b()); return absl::OkStatus(); }; TF_RETURN_IF_ERROR( llvm_ir::LoopEmitter(loop_body_emitter, input_shape, dynamic_dims, b()) .EmitLoop(IrName(hlo))); llvm_ir::EmitTuple(GetIrArrayFor(hlo), tuple_operand_ptrs, b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleTopK(HloInstruction* hlo) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(hlo)); const HloInstruction* input = hlo->operand(0); const int64_t k = hlo->shape().tuple_shapes(0).dimensions().back(); const bool has_batch = hlo->shape().tuple_shapes(0).dimensions_size() == 2; TF_RET_CHECK(input->shape().element_type() == F32) << hlo->ToString(); TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major( hlo->shape().tuple_shapes(0).layout())) << hlo->ToString(); TF_RET_CHECK(LayoutUtil::IsMonotonicWithDim0Major( hlo->shape().tuple_shapes(1).layout())) << hlo->ToString(); TF_RET_CHECK( LayoutUtil::IsMonotonicWithDim0Major(hlo->operand(0)->shape().layout())) << hlo->ToString(); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice values_slice, assignment_.GetUniqueSlice(hlo->operand(0), {})); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_values_slice, assignment_.GetUniqueSlice(hlo, {0})); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice out_indices_slice, assignment_.GetUniqueSlice(hlo, {1})); llvm::Value* values_ptr = EmitBufferPointer(values_slice, hlo->operand(0)->shape()); llvm::Value* out_values_ptr = EmitBufferPointer(out_values_slice, hlo->shape().tuple_shapes(0)); llvm::Value* out_indices_ptr = EmitBufferPointer(out_indices_slice, hlo->shape().tuple_shapes(1)); EmitCallToFunc( runtime::kTopKF32SymbolName, {b()->getInt64(has_batch ? input->shape().dimensions(0) : 1), b()->getInt64(input->shape().dimensions().back()), b()->getInt64(k), values_ptr, out_values_ptr, out_indices_ptr}, b()->getVoidTy()); llvm_ir::EmitTuple(GetIrArrayFor(hlo), {out_values_ptr, out_indices_ptr}, b()); return absl::OkStatus(); } #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3) std::vector<StackAlloca> IrEmitter::EmitOneDnnOperandsAlloca( HloInstruction* custom_call, llvm::Value*& args_val, int& arg_indx) { std::vector<StackAlloca> operands_stack_alloca; const int num_operands = custom_call->operand_count(); operands_stack_alloca.reserve(num_operands); for (int i = 0; i < num_operands; ++i) { llvm_ir::IrArray ir_array(GetIrArrayFor(custom_call->operand(i))); StackAlloca stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), ir_array); args_val = b()->CreateInsertValue(args_val, stack_alloca.value, arg_indx++); operands_stack_alloca.push_back(std::move(stack_alloca)); } return operands_stack_alloca; } absl::Status IrEmitter::HandleOneDnnMatMulCalls( HloInstruction* custom_call, std::string runtime_symbol_name) { const int nargs_offset = 3; const int num_operands = custom_call->operand_count(); const int nargs = nargs_offset + num_operands; int arg_indx = 0; llvm::Type* i64_type = b()->getInt64Ty(); llvm::Type* ptr_type = b()->getPtrTy(); llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs); llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type); llvm::Value* nargs_val = b()->getInt64(nargs); llvm::Value* nargs_ptr = llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b()); b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1)); b()->CreateStore(nargs_val, nargs_ptr); args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++); llvm::Value* run_opts_val = GetExecutableRunOptionsArgument(); args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++); auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call); auto backend_config = typed_custom_call->backend_config<BackendConfig>(); OneDnnMatMulConfig matmul_config; matmul_config.CopyFrom(backend_config->onednn_matmul_config()); std::string str_config; matmul_config.SerializeToString(&str_config); llvm::Value* matmul_config_val = b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config)); args_val = b()->CreateInsertValue(args_val, matmul_config_val, arg_indx++); auto operands_stack_alloca = EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx); TF_RET_CHECK(nargs == arg_indx) << "Number of arguments don't equal the last argument index."; llvm::Value* args_ptr = llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, "matmul.args", b()); b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1)); b()->CreateStore(args_val, args_ptr); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call)); StackAlloca result_stack_alloca; StackAlloca scratch_stack_alloca; std::vector<llvm::Value*> fn_call_args; fn_call_args.reserve(3); const bool use_scratchpad = custom_call->shape().IsTuple(); if (use_scratchpad) { llvm::Value* result_slice_ptr; llvm::Value* scratch_slice_ptr; llvm_ir::IrArray result_array; llvm_ir::IrArray scratch_array; TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice result_slice, assignment_.GetUniqueSlice(custom_call, {0})); const Shape& result_shape = custom_call->shape().tuple_shapes(0); result_slice_ptr = EmitBufferPointer(result_slice, result_shape); llvm::Type* ir_type = IrShapeType(result_shape); result_array = llvm_ir::IrArray(result_slice_ptr, ir_type, result_shape); result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array); fn_call_args.push_back(result_stack_alloca.value); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice scratch_slice, assignment_.GetUniqueSlice(custom_call, {1})); const Shape& scratch_shape = custom_call->shape().tuple_shapes(1); scratch_slice_ptr = EmitBufferPointer(scratch_slice, scratch_shape); llvm::Type* scratch_type = IrShapeType(scratch_shape); scratch_array = llvm_ir::IrArray(scratch_slice_ptr, scratch_type, scratch_shape); scratch_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), scratch_array); fn_call_args.push_back(scratch_stack_alloca.value); llvm_ir::EmitTuple(GetIrArrayFor(custom_call), {result_slice_ptr, scratch_slice_ptr}, b()); } else { llvm_ir::IrArray result_array; result_array = GetIrArrayFor(custom_call); result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array); fn_call_args.push_back(result_stack_alloca.value); fn_call_args.push_back(llvm::ConstantPointerNull::get(b()->getPtrTy())); } fn_call_args.push_back(args_ptr); EmitCallToFunc(std::move(runtime_symbol_name), fn_call_args, b()->getVoidTy()); b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1)); b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1)); for (auto& alloca : operands_stack_alloca) { alloca.EmitLifetimeEnd(); } result_stack_alloca.EmitLifetimeEnd(); if (use_scratchpad) { scratch_stack_alloca.EmitLifetimeEnd(); } return absl::OkStatus(); } absl::Status IrEmitter::HandleOneDnnConvolution(HloInstruction* custom_call) { const int nargs_offset = 3; const int num_operands = custom_call->operand_count(); const int nargs = nargs_offset + num_operands; int arg_indx = 0; llvm::Type* i64_type = b()->getInt64Ty(); llvm::Type* ptr_type = b()->getPtrTy(); llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs); llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type); llvm::Value* nargs_val = b()->getInt64(nargs); llvm::Value* nargs_ptr = llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b()); b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1)); b()->CreateStore(nargs_val, nargs_ptr); args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++); llvm::Value* run_opts_val = GetExecutableRunOptionsArgument(); args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++); auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call); auto backend_config = typed_custom_call->backend_config<BackendConfig>(); OneDnnConvolutionConfig conv_config; conv_config.CopyFrom(backend_config->onednn_conv_config()); std::string str_config; conv_config.SerializeToString(&str_config); llvm::Value* conv_config_val = b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config)); args_val = b()->CreateInsertValue(args_val, conv_config_val, arg_indx++); auto operands_stack_alloca = EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx); TF_RET_CHECK(nargs == arg_indx) << "Number of arguments don't equal the last argument index."; llvm::Value* args_ptr = llvm_ir::EmitAllocaAtFunctionEntry( ptr_array_type, "convolution.args", b()); b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1)); b()->CreateStore(args_val, args_ptr); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call)); llvm_ir::IrArray result_array = GetIrArrayFor(custom_call); auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array); EmitCallToFunc(runtime::kOneDnnConvolutionSymbolName, {result_stack_alloca.value, args_ptr}, b()->getVoidTy()); b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1)); for (int i = 0; i < num_operands; ++i) { operands_stack_alloca[i].EmitLifetimeEnd(); } b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1)); result_stack_alloca.EmitLifetimeEnd(); return absl::OkStatus(); } absl::Status IrEmitter::HandleOneDnnLayerNorm(HloInstruction* custom_call) { const int nargs_offset = 3; const int num_operands = custom_call->operand_count(); const int nargs = nargs_offset + num_operands; int arg_indx = 0; llvm::Type* i64_type = b()->getInt64Ty(); llvm::Type* ptr_type = b()->getPtrTy(); llvm::ArrayType* ptr_array_type = llvm::ArrayType::get(ptr_type, nargs); llvm::Value* args_val = llvm::UndefValue::get(ptr_array_type); llvm::Value* nargs_val = b()->getInt64(nargs); llvm::Value* nargs_ptr = llvm_ir::EmitAllocaAtFunctionEntry(i64_type, "nargs", b()); b()->CreateLifetimeStart(nargs_ptr, b()->getInt64(-1)); b()->CreateStore(nargs_val, nargs_ptr); args_val = b()->CreateInsertValue(args_val, nargs_ptr, arg_indx++); llvm::Value* run_opts_val = GetExecutableRunOptionsArgument(); args_val = b()->CreateInsertValue(args_val, run_opts_val, arg_indx++); auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call); auto backend_config = typed_custom_call->backend_config<BackendConfig>(); OneDnnNormConfig ln_config; ln_config.CopyFrom(backend_config->onednn_layer_norm_config()); std::string str_config; ln_config.SerializeToString(&str_config); llvm::Value* ln_config_val = b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config)); args_val = b()->CreateInsertValue(args_val, ln_config_val, arg_indx++); auto operands_stack_alloca = EmitOneDnnOperandsAlloca(custom_call, args_val, arg_indx); TF_RET_CHECK(nargs == arg_indx) << "Number of arguments don't equal the last argument index."; llvm::Value* args_ptr = llvm_ir::EmitAllocaAtFunctionEntry(ptr_array_type, "layernorm.args", b()); b()->CreateLifetimeStart(args_ptr, b()->getInt64(-1)); b()->CreateStore(args_val, args_ptr); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call)); llvm_ir::IrArray result_array = GetIrArrayFor(custom_call); auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array); EmitCallToFunc(runtime::kOneDnnLayerNormSymbolName, {result_stack_alloca.value, args_ptr}, b()->getVoidTy()); b()->CreateLifetimeEnd(nargs_ptr, b()->getInt64(-1)); for (int i = 0; i < num_operands; ++i) { operands_stack_alloca[i].EmitLifetimeEnd(); } b()->CreateLifetimeEnd(args_ptr, b()->getInt64(-1)); result_stack_alloca.EmitLifetimeEnd(); return absl::OkStatus(); } absl::Status IrEmitter::HandleOneDnnSoftmax(HloInstruction* custom_call) { auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call); auto backend_config = typed_custom_call->backend_config<BackendConfig>(); OneDnnSoftmaxConfig softmax_config; softmax_config.CopyFrom(backend_config->onednn_softmax_config()); std::string str_config; softmax_config.SerializeToString(&str_config); llvm::Value* softmax_config_val = b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(str_config)); auto input = custom_call->operand(0); llvm_ir::IrArray input_array(GetIrArrayFor(input)); auto input_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), input_array); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call)); llvm_ir::IrArray result_array = GetIrArrayFor(custom_call); auto result_stack_alloca = GetAllocaAndEmitMemrefInfo(*b(), result_array); EmitCallToFunc(runtime::kOneDnnSoftmaxSymbolName, {GetExecutableRunOptionsArgument(), input_stack_alloca.value, result_stack_alloca.value, softmax_config_val}, b()->getVoidTy()); input_stack_alloca.EmitLifetimeEnd(); result_stack_alloca.EmitLifetimeEnd(); return absl::OkStatus(); } #endif absl::Status IrEmitter::HandleCustomCall(HloInstruction* custom_call) { if (custom_call->custom_call_target() == "PadToStatic") { return HandlePadToStatic(custom_call); } if (custom_call->custom_call_target() == "SliceToDynamic") { return HandleSliceToDynamic(custom_call); } if (custom_call->custom_call_target() == "TopK") { return HandleTopK(custom_call); } #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3) if (custom_call->custom_call_target() == "__onednn$matmul") { return HandleOneDnnMatMulCalls(custom_call, runtime::kOneDnnMatMulSymbolName); } if (custom_call->custom_call_target() == "__onednn$softmax") { return HandleOneDnnSoftmax(custom_call); } if (custom_call->custom_call_target() == "__onednn$layernorm") { return HandleOneDnnLayerNorm(custom_call); } if (custom_call->custom_call_target() == "__onednn$convolution") { return HandleOneDnnConvolution(custom_call); } if (custom_call->custom_call_target() == "__onednn$matmul_reorder") { return HandleOneDnnMatMulCalls(custom_call, runtime::kOneDnnMatMulReorderSymbolName); } #endif absl::Span<HloInstruction* const> operands(custom_call->operands()); auto typed_custom_call = Cast<HloCustomCallInstruction>(custom_call); auto is_typed_ffi = typed_custom_call->api_version() == CustomCallApiVersion::API_VERSION_TYPED_FFI; std::vector<llvm::Value*> operand_values; operand_values.reserve(operands.size()); for (int64_t i = 0; i < operands.size(); ++i) { HloInstruction* operand = operands[i]; if (is_typed_ffi) { TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( operand->shape(), [&](const Shape& shape, const ShapeIndex& index) { if (!shape.IsArray()) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice, assignment_.GetUniqueSlice(operand, index)); operand_values.push_back(EmitBufferPointer(slice, shape)); return absl::OkStatus(); })); } else { operand_values.push_back(GetEmittedValueFor(operand)); } } llvm::AllocaInst* operands_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount( b()->getPtrTy(), b()->getInt32(operand_values.size()), "cc_operands_alloca", b()); if (emit_code_for_msan_) { const llvm::DataLayout& dl = module_->getDataLayout(); llvm::Type* intptr_type = b()->getIntPtrTy(dl); EmitCallToFunc("__msan_unpoison", {operands_alloca, llvm::ConstantInt::get( intptr_type, *operands_alloca->getAllocationSize(dl))}, b()->getVoidTy()); } for (int64_t i = 0; i < operand_values.size(); ++i) { llvm::Value* slot_in_operands_alloca = InBoundsGEP(operands_alloca->getAllocatedType(), operands_alloca, {b()->getInt64(i)}); Store(operand_values[i], slot_in_operands_alloca); } TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call)); std::vector<llvm::Value*> tuple_ptrs; if (custom_call->shape().IsTuple()) { for (int i = 0; i < ShapeUtil::TupleElementCount(custom_call->shape()); ++i) { const Shape& elem_shape = ShapeUtil::GetTupleElementShape(custom_call->shape(), i); if (!is_typed_ffi) { TF_RET_CHECK(!elem_shape.IsTuple()) << "Nested tuples not implemented"; } TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice, assignment_.GetUniqueSlice(custom_call, {i})); tuple_ptrs.push_back(EmitBufferPointer(slice, elem_shape)); } llvm_ir::EmitTuple(GetIrArrayFor(custom_call), tuple_ptrs, b()); } auto* output_address = GetEmittedValueFor(custom_call); switch (typed_custom_call->api_version()) { case CustomCallApiVersion::API_VERSION_ORIGINAL: EmitCallToFunc(custom_call->custom_call_target(), {output_address, operands_alloca}, b()->getVoidTy()); break; case CustomCallApiVersion::API_VERSION_STATUS_RETURNING: EmitCallToFunc(custom_call->custom_call_target(), {output_address, operands_alloca, GetStatusArgument()}, b()->getVoidTy()); EmitEarlyReturnIfErrorStatus(); break; case CustomCallApiVersion::API_VERSION_STATUS_RETURNING_UNIFIED: { absl::string_view opaque = typed_custom_call->opaque(); EmitCallToFunc(custom_call->custom_call_target(), {output_address, operands_alloca, b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(opaque)), b()->getInt64(opaque.size()), GetStatusArgument()}, b()->getVoidTy()); EmitEarlyReturnIfErrorStatus(); break; } case CustomCallApiVersion::API_VERSION_TYPED_FFI: { std::vector<llvm::Value*> buffer_ptrs; if (custom_call->shape().IsTuple()) { buffer_ptrs.reserve(ShapeUtil::TupleElementCount(custom_call->shape())); } TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( custom_call->shape(), [&](const Shape& shape, const ShapeIndex& index) { if (!shape.IsArray()) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice, assignment_.GetUniqueSlice(custom_call, index)); buffer_ptrs.push_back(EmitBufferPointer(slice, shape)); return absl::OkStatus(); })); llvm::AllocaInst* results_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount( b()->getPtrTy(), b()->getInt32(buffer_ptrs.size()), "ffi_results_alloca", b()); if (emit_code_for_msan_) { const llvm::DataLayout& dl = module_->getDataLayout(); llvm::Type* intptr_type = b()->getIntPtrTy(dl); EmitCallToFunc( "__msan_unpoison", {results_alloca, llvm::ConstantInt::get(intptr_type, *results_alloca->getAllocationSize(dl))}, b()->getVoidTy()); } for (int i = 0; i < buffer_ptrs.size(); ++i) { llvm::Value* tuple_slot_in_results_alloca = InBoundsGEP(results_alloca->getAllocatedType(), results_alloca, {b()->getInt64(i)}); Store(buffer_ptrs[i], tuple_slot_in_results_alloca); } EmitCallToFfi(typed_custom_call, results_alloca, operands_alloca); EmitEarlyReturnIfErrorStatus(); break; } default: return Internal( "Unknown custom-call API version enum value: %d (%s)", typed_custom_call->api_version(), CustomCallApiVersion_Name(typed_custom_call->api_version())); } return absl::OkStatus(); } absl::Status IrEmitter::HandleWhile(HloInstruction* xla_while) { HloComputation* condition = xla_while->while_condition(); TF_RET_CHECK(ShapeUtil::IsScalar(condition->root_instruction()->shape()) && condition->root_instruction()->shape().element_type() == PRED) << "While condition computation must return bool; got: " << ShapeUtil::HumanString(condition->root_instruction()->shape()); TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( xla_while->shape(), [this, &xla_while](const Shape& , const ShapeIndex& index) -> absl::Status { auto check = [this](const HloInstruction* a, const HloInstruction* b, const ShapeIndex& index) -> absl::Status { const BufferAllocation::Slice slice_a = assignment_.GetUniqueSlice(a, index).value(); const BufferAllocation::Slice slice_b = assignment_.GetUniqueSlice(b, index).value(); if (slice_a != slice_b) { return Internal( "instruction %s %s does not share slice with " "instruction %s %s", a->ToString(), slice_a.ToString(), b->ToString(), slice_b.ToString()); } return absl::OkStatus(); }; TF_RETURN_IF_ERROR(check(xla_while, xla_while->operand(0), index)); TF_RETURN_IF_ERROR(check( xla_while, xla_while->while_condition()->parameter_instruction(0), index)); TF_RETURN_IF_ERROR( check(xla_while, xla_while->while_body()->parameter_instruction(0), index)); TF_RETURN_IF_ERROR(check( xla_while, xla_while->while_body()->root_instruction(), index)); return absl::OkStatus(); })); const HloInstruction* init = xla_while->operand(0); emitted_value_[xla_while] = GetEmittedValueFor(init); llvm::BasicBlock* header_bb = llvm::BasicBlock::Create( module_->getContext(), IrName(xla_while, "header"), compute_function()->function()); Br(header_bb); b()->SetInsertPoint(header_bb); EmitGlobalCall(*xla_while->while_condition(), IrName(xla_while, "cond")); llvm::Value* while_predicate = ICmpNE( Load(IrShapeType( xla_while->while_condition()->root_instruction()->shape()), GetBufferForGlobalCallReturnValue(*xla_while->while_condition())), llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0)); llvm::BasicBlock* body_bb = llvm::BasicBlock::Create(module_->getContext(), IrName(xla_while, "body"), compute_function()->function()); llvm::BasicBlock* exit_bb = llvm::BasicBlock::Create( module_->getContext(), IrName(xla_while, "exit")); CondBr(while_predicate, body_bb, exit_bb); b()->SetInsertPoint(body_bb); EmitGlobalCall(*xla_while->while_body(), IrName(xla_while, "body")); Br(header_bb); llvm::Function* llvm_fn = compute_function()->function(); llvm_fn->insert(llvm_fn->end(), exit_bb); b()->SetInsertPoint(exit_bb); return absl::OkStatus(); } absl::Status IrEmitter::EmitFastConcatenate( const HloInstruction* instr, absl::Span<const llvm_ir::IrArray> source_arrays, const llvm_ir::IrArray& target_array) { return ::xla::cpu::EmitFastConcatenate(instr, source_arrays, target_array, module_, *b()); } absl::Status EmitFastConcatenate( const HloInstruction* instr, absl::Span<const llvm_ir::IrArray> source_arrays, const llvm_ir::IrArray& target_array, llvm::Module* module, llvm::IRBuilder<>& b) { auto* concatenate = Cast<HloConcatenateInstruction>(instr); const Shape& output_shape = concatenate->shape(); int64_t concat_dim = concatenate->concatenate_dimension(); const Layout& output_layout = output_shape.layout(); auto output_min2maj = LayoutUtil::MinorToMajor(output_layout); auto concat_dim_layout_itr = absl::c_find(output_min2maj, concat_dim); std::vector<int64_t> inner_dims(output_min2maj.begin(), concat_dim_layout_itr); std::vector<int64_t> outer_dims(std::next(concat_dim_layout_itr), output_min2maj.end()); llvm_ir::ForLoopNest loops(IrName(concatenate), &b); std::vector<llvm::Value*> target_multi_index = loops.AddLoopsForShapeOnDimensions(output_shape, outer_dims, "concat"); absl::c_replace(target_multi_index, static_cast<llvm::Value*>(nullptr), static_cast<llvm::Value*>(b.getInt64(0))); llvm_ir::IrArray::Index target_index(target_multi_index, output_shape, b.getInt64Ty()); if (!outer_dims.empty()) { SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b); } PrimitiveType primitive_type = output_shape.element_type(); unsigned primitive_type_size = ShapeUtil::ByteSizeOfPrimitiveType(primitive_type); llvm::Value* target_region_begin = target_array.EmitArrayElementAddress(target_index, &b, "target_region"); int64_t byte_offset_into_target_region = 0; int64_t inner_dims_product = absl::c_accumulate( inner_dims, int64_t{1}, [&](int64_t product, int64_t inner_dim) { return product * output_shape.dimensions(inner_dim); }); for (int64_t i = 0; i < source_arrays.size(); ++i) { const Shape& input_shape = concatenate->operand(i)->shape(); const llvm_ir::IrArray& source_array = source_arrays[i]; llvm_ir::IrArray::Index source_index(target_multi_index, input_shape, b.getInt64Ty()); llvm::Value* copy_source_address = source_array.EmitArrayElementAddress(source_index, &b, "src_addr"); llvm::Value* copy_target_address = b.CreateGEP(b.getInt8Ty(), target_region_begin, b.getInt64(byte_offset_into_target_region)); ::xla::cpu::EmitTransferElements( copy_target_address, copy_source_address, inner_dims_product * input_shape.dimensions(concat_dim), primitive_type, target_array, source_array, module, b); byte_offset_into_target_region += inner_dims_product * input_shape.dimensions(concat_dim) * primitive_type_size; } if (!outer_dims.empty()) { SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b); } return absl::OkStatus(); } llvm::Value* IrEmitter::EmitPrintf(absl::string_view fmt, absl::Span<llvm::Value* const> arguments) { std::vector<llvm::Value*> call_args; call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt))); absl::c_copy(arguments, std::back_inserter(call_args)); return b()->CreateCall( b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction( "printf", llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()}, true)), call_args); } llvm::Value* IrEmitter::EmitPrintfToStderr( absl::string_view fmt, absl::Span<llvm::Value* const> arguments) { std::vector<llvm::Value*> call_args; call_args.push_back(b()->CreateGlobalStringPtr(llvm_ir::AsStringRef(fmt))); absl::c_copy(arguments, std::back_inserter(call_args)); return b()->CreateCall( b()->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction( runtime::kPrintfToStderrSymbolName, llvm::FunctionType::get(b()->getInt32Ty(), {b()->getPtrTy()}, true)), call_args); } llvm::Value* IrEmitter::EmitCallToFunc( std::string func_name, const std::vector<llvm::Value*>& arguments, llvm::Type* return_type, bool does_not_throw, bool only_accesses_arg_memory, bool only_accesses_inaccessible_mem_or_arg_mem) { std::vector<llvm::Type*> types; types.reserve(arguments.size()); absl::c_transform(arguments, std::back_inserter(types), [&](llvm::Value* val) { return val->getType(); }); llvm::FunctionType* func_type = llvm::FunctionType::get(return_type, types, false); auto func = llvm::dyn_cast<llvm::Function>( module_->getOrInsertFunction(func_name, func_type).getCallee()); func->setCallingConv(llvm::CallingConv::C); if (does_not_throw) { func->setDoesNotThrow(); } if (only_accesses_arg_memory) { func->setOnlyAccessesArgMemory(); } if (only_accesses_inaccessible_mem_or_arg_mem) { func->setOnlyAccessesInaccessibleMemOrArgMem(); } return b()->CreateCall(func, arguments); } template <typename T> static const Shape& GetShape(T&& arg) { if constexpr (std::is_convertible_v<absl::remove_cvref_t<decltype(arg)>, Shape>) { return arg; } else { return arg->shape(); } }; struct EncodedInfo { llvm::AllocaInst* alloca; int64_t size; }; template <typename Args> static EncodedInfo StoreEncodedTypes(std::string_view alloca_name, const Args& args, llvm::IRBuilder<>& ir) { int64_t total_elements = 0; for (int64_t i = 0; i < args.size(); ++i) { total_elements += ShapeUtil::GetLeafCount(GetShape(args[i])); } llvm::AllocaInst* types_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount( ir.getInt32Ty(), ir.getInt64(total_elements), alloca_name, &ir); int64_t element_id = 0; auto store_type = [&](const Shape& shape, const ShapeIndex& index) { if (shape.IsTuple()) { return; } llvm::Value* slot_in_types_alloca = ir.CreateConstInBoundsGEP1_32( ir.getInt32Ty(), types_alloca, element_id++); ir.CreateStore(ir.getInt32(shape.element_type()), slot_in_types_alloca); }; for (int64_t i = 0; i < args.size(); ++i) { ShapeUtil::ForEachSubshape(GetShape(args[i]), store_type); } CHECK_EQ(element_id, total_elements); return {types_alloca, total_elements}; }; template <typename Args> static EncodedInfo StoreEncodedShapes(std::string_view alloca_name, const Args& args, llvm::IRBuilder<>& ir) { int64_t total_dims = 0; int64_t total_dim_counts = 0; for (int64_t i = 0; i < args.size(); ++i) { ShapeUtil::ForEachSubshape( GetShape(args[i]), [&](const Shape& shape, const ShapeIndex& index) { if (!shape.IsArray()) { return; } total_dims += shape.dimensions().size(); ++total_dim_counts; }); } int64_t shapes_encoding_size = total_dim_counts + total_dims; llvm::AllocaInst* shapes_alloca = llvm_ir::EmitAllocaAtFunctionEntryWithCount( ir.getInt64Ty(), ir.getInt64(shapes_encoding_size), alloca_name, &ir); int64_t slot_id = 0; auto store_shape = [&](const Shape& shape, const ShapeIndex& index) { if (!shape.IsArray()) { return; } llvm::Value* alloca_slot = ir.CreateConstInBoundsGEP1_64( ir.getInt64Ty(), shapes_alloca, slot_id++); ir.CreateStore(ir.getInt64(shape.dimensions().size()), alloca_slot); for (int64_t dim : shape.dimensions()) { alloca_slot = ir.CreateConstInBoundsGEP1_64(ir.getInt64Ty(), shapes_alloca, slot_id++); ir.CreateStore(ir.getInt64(dim), alloca_slot); } }; for (int64_t i = 0; i < args.size(); ++i) { ShapeUtil::ForEachSubshape(GetShape(args[i]), store_shape); } CHECK_EQ(slot_id, shapes_encoding_size); return {shapes_alloca, shapes_encoding_size}; }; llvm::Value* IrEmitter::EmitCallToFfi(HloCustomCallInstruction* custom_call, llvm::AllocaInst* results_alloca, llvm::AllocaInst* operands_alloca) { const auto& operands = absl::MakeSpan(custom_call->operands()); const auto& shape = custom_call->shape(); const auto& result_shapes = shape.IsTuple() ? shape.tuple_shapes() : std::vector<Shape>({shape}); EncodedInfo operand_types_encoded = StoreEncodedTypes("operands_types", operands, *b()); EncodedInfo operand_shapes_encoded = StoreEncodedShapes("operands_shapes", operands, *b()); EncodedInfo result_types_encoded = StoreEncodedTypes("results_types", result_shapes, *b()); EncodedInfo result_shapes_encoded = StoreEncodedShapes("results_shapes", result_shapes, *b()); const absl::string_view target = custom_call->custom_call_target(); const absl::string_view opaque = custom_call->opaque(); const auto target_ref = llvm_ir::AsStringRef(target); const auto opaque_ref = llvm_ir::AsStringRef(opaque); std::vector<llvm::Value*> arguments = { GetExecutableRunOptionsArgument(), b()->CreateGlobalStringPtr(target_ref), b()->getInt64(target.size()), results_alloca, operands_alloca, b()->CreateGlobalStringPtr(opaque_ref), b()->getInt64(opaque.size()), GetStatusArgument(), operand_types_encoded.alloca, b()->getInt64(operand_types_encoded.size), operand_shapes_encoded.alloca, result_types_encoded.alloca, b()->getInt64(result_types_encoded.size), result_shapes_encoded.alloca, }; return EmitCallToFunc(runtime::kHandleFfiCallSymbolName, arguments, b()->getVoidTy(), false, true); } void IrEmitter::EmitTransferElements(llvm::Value* target, llvm::Value* source, int64_t element_count, PrimitiveType primitive_type, const llvm_ir::IrArray& target_array, const llvm_ir::IrArray& source_array) { ::xla::cpu::EmitTransferElements(target, source, element_count, primitive_type, target_array, source_array, module_, *b()); } void EmitTransferElements(llvm::Value* target, llvm::Value* source, int64_t element_count, PrimitiveType primitive_type, const llvm_ir::IrArray& target_array, const llvm_ir::IrArray& source_array, llvm::Module* module, llvm::IRBuilder<>& b) { unsigned primitive_type_size = ShapeUtil::ByteSizeOfPrimitiveType(primitive_type); llvm::Align element_alignment(tsl::MathUtil::GCD<unsigned>( primitive_type_size, ::xla::cpu::MinimumAlignmentForPrimitiveType(primitive_type))); llvm::Type* primitive_llvm_type = llvm_ir::PrimitiveTypeToIrType(primitive_type, module); if (element_count == 1) { auto* load_instruction = b.CreateAlignedLoad(primitive_llvm_type, source, element_alignment); source_array.AnnotateLoadStoreInstructionWithMetadata(load_instruction); auto* store_instruction = b.CreateAlignedStore(load_instruction, target, element_alignment); target_array.AnnotateLoadStoreInstructionWithMetadata(store_instruction); } else { auto* memcpy_instruction = b.CreateMemCpy( target, llvm::Align(element_alignment), source, llvm::Align(element_alignment), element_count * primitive_type_size); std::map<int, llvm::MDNode*> merged_metadata = llvm_ir::MergeMetadata(&module->getContext(), source_array.metadata(), target_array.metadata()); for (const auto& kind_md_pair : merged_metadata) { memcpy_instruction->setMetadata(kind_md_pair.first, kind_md_pair.second); } } } absl::Status IrEmitter::CanDoFastConcatenate( const HloInstruction* instr) const { if (ShouldEmitParallelLoopFor(*instr)) { return absl::Status( absl::StatusCode::kFailedPrecondition, "Cannot generate memcpy-based concat for the parallel CPU backend"); } const auto* concatenate = Cast<HloConcatenateInstruction>(instr); const Shape& output_shape = concatenate->shape(); for (auto* op : concatenate->operands()) { if (!LayoutUtil::Equal(op->shape().layout(), output_shape.layout())) { return absl::Status(absl::StatusCode::kFailedPrecondition, "Operand has mismatching layouts"); } } return absl::OkStatus(); } absl::Status IrEmitter::HandleConcatenate(HloInstruction* concatenate) { absl::Status fast_impl_reason = CanDoFastConcatenate(concatenate); if (fast_impl_reason.ok()) { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(concatenate)); llvm_ir::IrArray target_array = GetIrArrayFor(concatenate); std::vector<llvm_ir::IrArray> source_arrays; source_arrays.reserve(concatenate->operands().size()); for (HloInstruction* operand : concatenate->operands()) { source_arrays.emplace_back(GetIrArrayFor(operand)); } TF_RETURN_IF_ERROR(::xla::cpu::EmitFastConcatenate( concatenate, source_arrays, target_array, module_, *b())); VLOG(1) << "Emitted fast concatenate for " << concatenate->ToString(); return absl::OkStatus(); } VLOG(1) << "Could not emit fast concatenate for " << concatenate->ToString() << ": " << fast_impl_reason.message(); return DefaultAction(concatenate); } absl::Status IrEmitter::HandleConditional(HloInstruction* conditional) { auto branch_index = conditional->operand(0); int num_branches = conditional->branch_count(); TF_RET_CHECK(ShapeUtil::IsScalar(branch_index->shape()) && (branch_index->shape().element_type() == PRED || branch_index->shape().element_type() == S32)) << "Branch index on a conditional must be scalar bool or int32_t; got: " << ShapeUtil::HumanString(branch_index->shape()); for (int b = 0; b < num_branches; ++b) { HloComputation* br_computation = conditional->branch_computation(b); TF_RET_CHECK(ShapeUtil::Equal(conditional->shape(), br_computation->root_instruction()->shape())) << "Shape of conditional should be same as the shape of the " << b << "th branch computation; got: " << ShapeUtil::HumanString(conditional->shape()) << " and " << ShapeUtil::HumanString(br_computation->root_instruction()->shape()); } TF_RETURN_IF_ERROR(EmitTargetAddressForOp(conditional)); if (branch_index->shape().element_type() == PRED) { llvm::LoadInst* pred_value = Load( GetIrArrayFor(branch_index).GetBasePointeeType(), GetIrArrayFor(branch_index).GetBasePointer(), "load_predicate_value"); llvm::Value* pred_cond = ICmpNE(pred_value, llvm::ConstantInt::get( llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0), "boolean_predicate"); llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(pred_cond, "conditional", b()); SetToFirstInsertPoint(if_data.true_block, b()); EmitGlobalCall(*conditional->branch_computation(0), IrName(conditional, "_true")); SetToFirstInsertPoint(if_data.false_block, b()); EmitGlobalCall(*conditional->branch_computation(1), IrName(conditional, "_false")); SetToFirstInsertPoint(if_data.after_block, b()); return absl::OkStatus(); } llvm::LoadInst* branch_index_value = Load( GetIrArrayFor(branch_index).GetBasePointeeType(), GetIrArrayFor(branch_index).GetBasePointer(), "load_branch_index_value"); auto case_block = b()->GetInsertBlock(); llvm::BasicBlock* after_block; if (case_block->getTerminator() == nullptr) { after_block = llvm_ir::CreateBasicBlock(nullptr, "case-after", b()); b()->SetInsertPoint(case_block); b()->CreateBr(after_block); } else { after_block = case_block->splitBasicBlock(b()->GetInsertPoint(), "case-after"); } case_block->getTerminator()->eraseFromParent(); auto default_block = llvm_ir::CreateBasicBlock(nullptr, "case-default", b()); b()->SetInsertPoint(default_block); EmitGlobalCall(*conditional->branch_computation(num_branches - 1), IrName(conditional, "_default")); b()->CreateBr(after_block); b()->SetInsertPoint(case_block); llvm::SwitchInst* case_inst = b()->CreateSwitch(branch_index_value, default_block, num_branches - 1); for (int br = 0; br < num_branches - 1; ++br) { auto branch_block = llvm_ir::CreateBasicBlock( nullptr, absl::StrCat("case-branch", br), b()); b()->SetInsertPoint(branch_block); EmitGlobalCall(*conditional->branch_computation(br), IrName(conditional, absl::StrCat("_branch", br))); b()->CreateBr(after_block); case_inst->addCase(b()->getInt32(br), branch_block); } SetToFirstInsertPoint(after_block, b()); return absl::OkStatus(); } absl::Status IrEmitter::HandleAfterAll(HloInstruction* after_all) { TF_RET_CHECK(ByteSizeOf(after_all->shape()) == 0); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(after_all)); return absl::OkStatus(); } absl::Status IrEmitter::HandleBatchNormGrad(HloInstruction* batch_norm_grad) { return Unimplemented("BatchNormGrad should be rewritten for CPU."); } absl::Status IrEmitter::HandleBatchNormTraining( HloInstruction* batch_norm_training) { return Unimplemented("BatchNormTraining should be rewritten for CPU."); } absl::Status IrEmitter::HandleGetDimensionSize(HloInstruction* get_size) { return Unimplemented("GetDimensionSize should be rewritten for CPU."); } absl::Status IrEmitter::HandleSetDimensionSize(HloInstruction* set_size) { return Unimplemented("SetDimensionSize should be rewritten for CPU."); } absl::Status IrEmitter::HandleAddDependency(HloInstruction* add_dependency) { emitted_value_[add_dependency] = GetEmittedValueFor(add_dependency->operand(0)); return absl::OkStatus(); } absl::Status IrEmitter::HandleRng(HloInstruction* rng) { return Unimplemented("Rng should be expanded for CPU."); } absl::Status IrEmitter::HandleRngBitGenerator(HloInstruction* rng) { return Unimplemented("RngBitGenerator should be expanded for CPU."); } absl::Status IrEmitter::HandleRngGetAndUpdateState(HloInstruction* rng_state) { VLOG(2) << "RngGetAndUpdateState: " << rng_state->ToString(); llvm::Value* old_state = llvm_ir::RngGetAndUpdateState( Cast<HloRngGetAndUpdateStateInstruction>(rng_state)->delta(), module_, b()); TF_RETURN_IF_ERROR(EmitTargetAddressForOp(rng_state)); llvm::Value* address = GetEmittedValueFor(rng_state); llvm::StoreInst* store = Store(old_state, address); store->setAlignment(llvm::Align(IrEmitter::MinimumAlignmentForPrimitiveType( rng_state->shape().element_type()))); return absl::OkStatus(); } absl::Status IrEmitter::HandleStochasticConvert(HloInstruction* instruction) { return Unimplemented("StochasticConvert should be decomposed for CPU."); } absl::Status IrEmitter::FinishVisit(HloInstruction* root) { VLOG(2) << "FinishVisit root: " << root->ToString(); if (root->opcode() == HloOpcode::kOutfeed) { VLOG(2) << " outfeed with value: " << llvm_ir::DumpToString(GetEmittedValueFor(root->operand(0))); } else { VLOG(2) << " value: " << llvm_ir::DumpToString(GetEmittedValueFor(root)); } auto record_complete_computation = [&](llvm::Value* prof_counter) { if (prof_counter) { profiling_state_.RecordCompleteComputation(b(), prof_counter); } }; record_complete_computation(GetProfileCounterFor(*root->parent())); return absl::OkStatus(); } template <typename T> llvm::Value* IrEmitter::GetProfileCounterCommon( const T& hlo, const absl::flat_hash_map<const T*, int64_t>& profile_index_map) { auto it = profile_index_map.find(&hlo); if (it == profile_index_map.end()) { return nullptr; } int64_t prof_counter_idx = it->second; std::string counter_name = IrName("prof_counter", hlo.name()); return GEP(b()->getInt64Ty(), GetProfileCountersArgument(), b()->getInt64(prof_counter_idx), counter_name); } llvm::Value* IrEmitter::GetProfileCounterFor( const HloInstruction& instruction) { return GetProfileCounterCommon<HloInstruction>(instruction, instruction_to_profile_idx_); } llvm::Value* IrEmitter::GetProfileCounterFor( const HloComputation& computation) { return GetProfileCounterCommon<HloComputation>(computation, computation_to_profile_idx_); } void IrEmitter::ProfilingState::UpdateProfileCounter(llvm::IRBuilder<>* b, llvm::Value* prof_counter, llvm::Value* cycle_end, llvm::Value* cycle_start) { auto* cycle_diff = b->CreateSub(cycle_end, cycle_start); llvm::LoadInst* old_cycle_count = b->CreateLoad( llvm::cast<llvm::GetElementPtrInst>(prof_counter)->getSourceElementType(), prof_counter, "old_cycle_count"); auto* new_cycle_count = b->CreateAdd(cycle_diff, old_cycle_count, "new_cycle_count"); b->CreateStore(new_cycle_count, prof_counter); } llvm::Value* IrEmitter::ProfilingState::ReadCycleCounter(llvm::IRBuilder<>* b) { llvm::Module* module = b->GetInsertBlock()->getModule(); if (!use_rdtscp_) { llvm::Function* func_llvm_readcyclecounter = llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::readcyclecounter); return b->CreateCall(func_llvm_readcyclecounter); } llvm::Function* func_llvm_x86_rdtscp = llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::x86_rdtscp); llvm::Value* rdtscp_call = b->CreateCall(func_llvm_x86_rdtscp); return b->CreateExtractValue(rdtscp_call, {0}); } void IrEmitter::ProfilingState::RecordCycleStart(llvm::IRBuilder<>* b, HloInstruction* hlo) { auto* cycle_start = ReadCycleCounter(b); cycle_start->setName(IrName(hlo, "cycle_start")); cycle_starts_[hlo] = cycle_start; if (first_read_cycle_start_ == nullptr) { first_read_cycle_start_ = cycle_start; } } void IrEmitter::ProfilingState::RecordCycleDelta(llvm::IRBuilder<>* b, HloInstruction* hlo, llvm::Value* prof_counter) { auto* cycle_end = ReadCycleCounter(b); cycle_end->setName(IrName(hlo, "cycle_end")); auto* cycle_start = cycle_starts_[hlo]; UpdateProfileCounter(b, prof_counter, cycle_end, cycle_start); last_read_cycle_end_ = cycle_end; } void IrEmitter::ProfilingState::RecordCompleteComputation( llvm::IRBuilder<>* b, llvm::Value* prof_counter) { if (last_read_cycle_end_ && first_read_cycle_start_) { UpdateProfileCounter(b, prof_counter, last_read_cycle_end_, first_read_cycle_start_); } } void IrEmitter::TracingState::EmitTracingStart(llvm::IRBuilder<>* b, HloInstruction* hlo, llvm::Value* run_options) { if (!enabled_) { return; } llvm::Type* void_ptr_type = b->getPtrTy(); llvm::FunctionType* fn_type = llvm::FunctionType::get( b->getInt64Ty(), {void_ptr_type, void_ptr_type, void_ptr_type, b->getInt64Ty()}, false); llvm::Function* function = b->GetInsertBlock()->getParent(); llvm::Module* module = function->getParent(); const char* fn_name = runtime::kTracingStartSymbolName; llvm::FunctionCallee trace_func = module->getOrInsertFunction(fn_name, fn_type); if (auto* fn = llvm::dyn_cast<llvm::Function>(trace_func.getCallee())) { fn->setCallingConv(llvm::CallingConv::C); fn->setDoesNotThrow(); fn->setOnlyAccessesArgMemory(); } auto* hlo_name = b->CreateGlobalStringPtr(hlo->name()); auto* hlo_module = b->CreateGlobalStringPtr(hlo->GetModule()->name()); auto* program_id = b->getInt64(hlo->GetModule()->unique_id()); auto* activity_id = b->CreateCall( trace_func, {run_options, hlo_name, hlo_module, program_id}); activity_id->setName(IrName(hlo, "activity_id")); activity_ids_[hlo] = activity_id; } void IrEmitter::TracingState::EmitTracingEnd(llvm::IRBuilder<>* b, HloInstruction* hlo, llvm::Value* run_options) { if (!enabled_) { return; } llvm::FunctionType* fn_type = llvm::FunctionType::get(b->getVoidTy(), {b->getPtrTy(), b->getInt64Ty()}, false); llvm::Function* function = b->GetInsertBlock()->getParent(); llvm::Module* module = function->getParent(); const char* fn_name = runtime::kTracingEndSymbolName; llvm::FunctionCallee trace_func = module->getOrInsertFunction(fn_name, fn_type); if (auto* fn = llvm::dyn_cast<llvm::Function>(trace_func.getCallee())) { fn->setCallingConv(llvm::CallingConv::C); fn->setDoesNotThrow(); fn->setOnlyAccessesArgMemory(); } auto* activity_id = activity_ids_.at(hlo); b->CreateCall(trace_func, {run_options, activity_id}); } namespace { bool IsHloVeryCheap(const HloInstruction* hlo) { return hlo->opcode() == HloOpcode::kBitcast || hlo->opcode() == HloOpcode::kTuple || hlo->opcode() == HloOpcode::kGetTupleElement || hlo->opcode() == HloOpcode::kParameter || hlo->opcode() == HloOpcode::kConstant || hlo->opcode() == HloOpcode::kReplicaId; } } absl::Status IrEmitter::Preprocess(HloInstruction* hlo) { VLOG(3) << "Visiting: " << hlo->ToString(); if (instruction_to_profile_idx_.count(hlo) || (hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) && hlo->parent()->IsEntryComputation())) { tracing_state_.EmitTracingStart(b(), hlo, GetExecutableRunOptionsArgument()); profiling_state_.RecordCycleStart(b(), hlo); } return absl::OkStatus(); } absl::Status IrEmitter::Postprocess(HloInstruction* hlo) { if (auto* prof_counter = GetProfileCounterFor(*hlo)) { profiling_state_.RecordCycleDelta(b(), hlo, prof_counter); } if (instruction_to_profile_idx_.count(hlo) || (hlo_module_config_.cpu_traceme_enabled() && !IsHloVeryCheap(hlo) && hlo->parent()->IsEntryComputation())) { tracing_state_.EmitTracingEnd(b(), hlo, GetExecutableRunOptionsArgument()); } return absl::OkStatus(); } llvm_ir::IrArray IrEmitter::GetIrArrayFor(const HloInstruction* hlo) { llvm::Value* value_for_op = GetEmittedValueFor(hlo); llvm::Type* ir_type = IrShapeType(hlo->shape()); llvm_ir::IrArray array(value_for_op, ir_type, hlo->shape()); AddAliasingInformationToIrArray(*hlo, &array); return array; } std::vector<llvm_ir::IrArray> IrEmitter::GetIrArraysForOperandsOf( const HloInstruction* hlo) { std::vector<llvm_ir::IrArray> arrays; std::transform( hlo->operands().begin(), hlo->operands().end(), std::back_inserter(arrays), [&](const HloInstruction* operand) { return GetIrArrayFor(operand); }); return arrays; } llvm::Value* IrEmitter::GetEmittedValueFor(const HloInstruction* hlo) { auto it = emitted_value_.find(hlo); if (it == emitted_value_.end()) { LOG(FATAL) << "could not find emitted value for: " << hlo->ToString(); } return it->second; } llvm::Type* IrEmitter::IrShapeType(const Shape& shape) { return llvm_ir::ShapeToIrType(shape, module_); } llvm::Value* IrEmitter::GetProfileCountersArgument() { return compute_function()->profile_counters_arg(); } llvm::Value* IrEmitter::GetStatusArgument() { return compute_function()->status_arg(); } llvm::Value* IrEmitter::GetBufferTableArgument() { return compute_function()->buffer_table_arg(); } llvm::Value* IrEmitter::GetExecutableRunOptionsArgument() { return compute_function()->exec_run_options_arg(); } llvm::BasicBlock* IrEmitter::GetReturnBlock() { return compute_function()->return_block(); } void IrEmitter::EmitEarlyReturnIfErrorStatus() { llvm::Value* succeeded = EmitCallToFunc(runtime::kStatusIsSuccessSymbolName, {GetStatusArgument()}, b()->getInt1Ty(), true, true); llvm_ir::EmitEarlyReturn(succeeded, b(), GetReturnBlock()); } llvm::Value* IrEmitter::EmitThreadLocalBufferPointer( const BufferAllocation::Slice& slice, const Shape& target_shape) { const BufferAllocation& allocation = *slice.allocation(); llvm::Value* tempbuf_address = [&]() -> llvm::Value* { auto param_it = computation_parameter_allocations_.find(slice.allocation()->index()); if (param_it != computation_parameter_allocations_.end()) { int64_t param_number = param_it->second; llvm::Value* params = compute_function()->parameters_arg(); llvm::Value* param_address_offset = llvm_ir::EmitBufferIndexingGEP( params, b()->getPtrTy(), param_number, b()); llvm::LoadInst* param_address_untyped = Load(b()->getPtrTy(), param_address_offset); if (!target_shape.IsOpaque()) { AttachAlignmentMetadataForLoad(param_address_untyped, target_shape); AttachDereferenceableMetadataForLoad(param_address_untyped, target_shape); } return param_address_untyped; } const auto& assigned_buffers = allocation.assigned_buffers(); CHECK_EQ(1, assigned_buffers.size()); const Shape& shape = assigned_buffers.begin()->first->shape(); std::pair<llvm::Function*, BufferAllocation::Slice> key = { compute_function()->function(), slice}; auto buf_it = thread_local_buffers_.find(key); if (buf_it == thread_local_buffers_.end()) { llvm::Value* buffer = llvm_ir::EmitAllocaAtFunctionEntry( IrShapeType(shape), absl::StrCat("thread_local", slice.ToString()), b(), MinimumAlignmentForShape(target_shape)); auto it_inserted_pair = thread_local_buffers_.insert({key, buffer}); CHECK(it_inserted_pair.second); buf_it = it_inserted_pair.first; } return buf_it->second; }(); return tempbuf_address; } llvm::Value* IrEmitter::EmitGlobalBufferPointer( const BufferAllocation::Slice& slice, const Shape& target_shape) { const BufferAllocation& allocation = *slice.allocation(); llvm::Value* tempbuf_address_ptr = llvm_ir::EmitBufferIndexingGEP( GetBufferTableArgument(), b()->getPtrTy(), slice.index(), b()); llvm::LoadInst* tempbuf_address_base = Load(b()->getPtrTy(), tempbuf_address_ptr); AttachInvariantLoadMetadataForLoad(tempbuf_address_base); AttachAlignmentMetadataForLoad(tempbuf_address_base, allocation.size()); AttachDereferenceableMetadataForLoad(tempbuf_address_base, allocation.size()); llvm::Value* tempbuf_address_untyped = tempbuf_address_base; if (slice.offset() > 0) { tempbuf_address_untyped = InBoundsGEP( b()->getInt8Ty(), tempbuf_address_base, b()->getInt64(slice.offset())); } return tempbuf_address_untyped; } llvm::Value* IrEmitter::EmitBufferPointer(const BufferAllocation::Slice& slice, const Shape& target_shape) { if (slice.allocation()->is_thread_local()) { return EmitThreadLocalBufferPointer(slice, target_shape); } else if (slice.allocation()->is_constant()) { return FindOrDie(constant_buffer_to_global_, slice.allocation()->index()); } else { return EmitGlobalBufferPointer(slice, target_shape); } } absl::Status IrEmitter::EmitTargetAddressForOp(const HloInstruction* op) { const Shape& target_shape = op->shape(); TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice, assignment_.GetUniqueTopLevelSlice(op)); llvm::Value* addr = EmitBufferPointer(slice, target_shape); addr->setName(IrName(op)); emitted_value_[op] = addr; return absl::OkStatus(); } absl::Status IrEmitter::EmitTargetElementLoop( const HloInstruction* target_op, absl::string_view desc, const llvm_ir::ElementGenerator& element_generator, std::optional<llvm_ir::IrArray> result_array_opt) { VLOG(2) << "EmitTargetElementLoop: " << target_op->ToString(); llvm_ir::IrArray target_array; if (result_array_opt.has_value()) { target_array = result_array_opt.value(); } else { TF_RETURN_IF_ERROR(EmitTargetAddressForOp(target_op)); target_array = GetIrArrayFor(target_op); } const Shape& target_shape = target_op->shape(); if (target_shape.IsTuple() && (target_op->opcode() == HloOpcode::kFusion || target_op->opcode() == HloOpcode::kReduce || target_op->opcode() == HloOpcode::kReduceWindow)) { TF_RET_CHECK(num_dynamic_loop_bounds_ == 0); std::vector<llvm_ir::IrArray> output_arrays; for (int64_t i = 0; i < ShapeUtil::TupleElementCount(target_shape); ++i) { TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice, assignment_.GetUniqueSlice(target_op, {i})); const Shape& element_shape = ShapeUtil::GetSubshape(target_shape, {i}); llvm::Value* op_target_address = EmitBufferPointer(slice, element_shape); llvm::Type* op_target_type = IrShapeType(element_shape); output_arrays.push_back( llvm_ir::IrArray(op_target_address, op_target_type, element_shape)); } TF_RETURN_IF_ERROR( llvm_ir::LoopEmitter(element_generator, output_arrays, b()) .EmitLoop(IrName(target_op, desc))); std::vector<llvm::Value*> tuple_operand_ptrs; tuple_operand_ptrs.reserve(output_arrays.size()); for (int64_t i = 0; i < output_arrays.size(); ++i) { tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer()); } llvm_ir::EmitTuple(target_array, tuple_operand_ptrs, b()); } else { if (ShouldEmitParallelLoopFor(*target_op)) { std::vector<std::pair<llvm::Value*, llvm::Value*>> dynamic_loop_bounds = compute_function()->GetDynamicLoopBounds(); TF_RETURN_IF_ERROR(ParallelLoopEmitter(element_generator, target_array, &dynamic_loop_bounds, b()) .EmitLoop(IrName(target_op, desc))); } else { TF_RETURN_IF_ERROR( llvm_ir::LoopEmitter(element_generator, target_array, b()) .EmitLoop(IrName(target_op, desc))); } } return absl::OkStatus(); } absl::Status IrEmitter::EmitMemcpy(const HloInstruction& source, const HloInstruction& destination) { llvm::Value* source_value = GetEmittedValueFor(&source); llvm::Value* destination_value = GetEmittedValueFor(&destination); int64_t source_size = ByteSizeOf(source.shape()); MemCpy(destination_value, llvm::Align(1), source_value, llvm::Align(1), source_size); return absl::OkStatus(); } absl::Status IrEmitter::ElementTypesSameAndSupported( const HloInstruction& instruction, absl::Span<const HloInstruction* const> operands, absl::Span<const PrimitiveType> supported_types) { for (auto operand : operands) { TF_RET_CHECK( ShapeUtil::SameElementType(operands[0]->shape(), operand->shape())); } TF_RET_CHECK(!operands.empty()); PrimitiveType primitive_type = operands[0]->shape().element_type(); if (!absl::c_linear_search(supported_types, primitive_type)) { return Unimplemented("unsupported operand type %s in op %s", PrimitiveType_Name(primitive_type), HloOpcodeString(instruction.opcode())); } return absl::OkStatus(); } absl::Status IrEmitter::DefaultAction(HloInstruction* hlo) { ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator; for (const HloInstruction* operand : hlo->operands()) { operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) { return GetIrArrayFor(operand).EmitReadArrayElement(index, b()); }; } CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_); return EmitTargetElementLoop( hlo, "elemental_loop", elemental_emitter.MakeElementGenerator(hlo, operand_to_generator), std::nullopt); } llvm::Value* IrEmitter::EmitScalarReturningThreadLocalCall( const HloComputation& callee, absl::Span<llvm::Value* const> parameters, absl::string_view name) { std::vector<llvm::Value*> return_value = EmitThreadLocalCall(callee, parameters, name, false); CHECK_EQ(return_value.size(), 1); return return_value[0]; } std::vector<llvm::Value*> IrEmitter::EmitThreadLocalCall( const HloComputation& callee, absl::Span<llvm::Value* const> parameters, absl::string_view name, bool is_reducer, bool in_compute_function) { CHECK(absl::c_binary_search(thread_local_computations_, &callee)); const Shape& return_shape = callee.root_instruction()->shape(); bool is_scalar_return = ShapeUtil::IsScalar(return_shape); bool is_tuple_of_scalars_return = return_shape.IsTuple() && absl::c_all_of(return_shape.tuple_shapes(), [&](const Shape& shape) { return ShapeUtil::IsScalar(shape); }); CHECK(is_scalar_return || is_tuple_of_scalars_return); std::vector<llvm::Value*> parameter_addrs; for (llvm::Value* parameter : parameters) { CHECK(!parameter->getType()->isPointerTy()); llvm::Value* parameter_addr = llvm_ir::EmitAllocaAtFunctionEntry( parameter->getType(), "arg_addr", b()); Store(parameter, parameter_addr); parameter_addrs.push_back(parameter_addr); } llvm::Type* return_value_buffer_type = llvm_ir::ShapeToIrType(return_shape, module_); std::string retval_alloca_name = absl::StrCat(name, "_return_value_addr"); int retval_alignment = is_scalar_return ? MinimumAlignmentForPrimitiveType(return_shape.element_type()) : 0; llvm::AllocaInst* return_value_buffer = llvm_ir::EmitAllocaAtFunctionEntry( return_value_buffer_type, retval_alloca_name, b(), retval_alignment); std::vector<llvm::Value*> allocas_for_returned_scalars; if (is_scalar_return) { allocas_for_returned_scalars.push_back(return_value_buffer); } else { constexpr int max_tuple_size = 1000; CHECK_LT(return_shape.tuple_shapes_size(), max_tuple_size) << "Multivalue function can not return more than 1000 elements to avoid" << " stack smashing"; allocas_for_returned_scalars = llvm_ir::EmitTupleAllocasAtFunctionEntry(return_shape, b()); llvm_ir::IrArray tuple_array(return_value_buffer, return_value_buffer_type, return_shape); EmitTuple(tuple_array, allocas_for_returned_scalars, b()); } llvm::Value* null_ptr = llvm::Constant::getNullValue(b()->getPtrTy()); Call( FindOrDie(emitted_functions_, ComputationToEmit{&callee, allow_reassociation_ || is_reducer}), GetArrayFunctionCallArguments( parameter_addrs, b(), name, return_value_buffer, in_compute_function ? GetExecutableRunOptionsArgument() : null_ptr, null_ptr, in_compute_function ? GetStatusArgument() : null_ptr, in_compute_function ? GetProfileCountersArgument() : null_ptr)); if (ComputationTransitivelyContainsCustomCall(&callee)) { DCHECK(!in_compute_function) << "Custom call inside nested computations " "are not supported by Thunks runtime"; EmitEarlyReturnIfErrorStatus(); } std::vector<llvm::Value*> returned_scalars; returned_scalars.reserve(allocas_for_returned_scalars.size()); for (llvm::Value* addr : allocas_for_returned_scalars) { returned_scalars.push_back( Load(llvm::cast<llvm::AllocaInst>(addr)->getAllocatedType(), addr)); } return returned_scalars; } void IrEmitter::EmitGlobalCall(const HloComputation& callee, absl::string_view name) { CHECK(absl::c_binary_search(global_computations_, &callee)); Call(FindOrDie(emitted_functions_, ComputationToEmit{&callee, allow_reassociation_}), GetArrayFunctionCallArguments( {}, b(), name, llvm::Constant::getNullValue(b()->getPtrTy()), GetExecutableRunOptionsArgument(), GetBufferTableArgument(), GetStatusArgument(), GetProfileCountersArgument())); if (ComputationTransitivelyContainsCustomCall(&callee)) { EmitEarlyReturnIfErrorStatus(); } } llvm::Value* IrEmitter::GetBufferForGlobalCallReturnValue( const HloComputation& callee) { const HloInstruction* root_inst = callee.root_instruction(); if (root_inst->opcode() == HloOpcode::kOutfeed) { return llvm::Constant::getNullValue(b()->getPtrTy()); } const BufferAllocation::Slice root_buffer = assignment_.GetUniqueTopLevelSlice(root_inst).value(); return EmitBufferPointer(root_buffer, root_inst->shape()); } void IrEmitter::BindFusionArguments(const HloInstruction* fusion, FusedIrEmitter* fused_emitter) { for (int i = 0; i < fusion->operand_count(); i++) { const HloInstruction* operand = fusion->operand(i); fused_emitter->BindGenerator( *fusion->fused_parameter(i), [this, operand](llvm_ir::IrArray::Index index) { return GetIrArrayFor(operand).EmitReadArrayElement(index, b()); }); } } } }
#include "xla/service/cpu/ir_emitter.h" #include <cstdint> #include <memory> #include <utility> #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/Casting.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/buffer_assignment.h" #include "xla/service/cpu/ir_function.h" #include "xla/service/cpu/target_machine_features_fake.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_ordering.h" #include "xla/service/hlo_parser.h" #include "xla/service/logical_buffer.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla::cpu { namespace { using IrEmitterTest = HloTestBase; static std::pair<llvm::Function*, llvm::BasicBlock*> CreateFunction( llvm::LLVMContext& context, llvm::Module* module, llvm::IRBuilder<>* b) { llvm::PointerType* ptrtype = llvm::PointerType::getUnqual(context); llvm::FunctionType* ftype = llvm::FunctionType::get(ptrtype, ptrtype, false); llvm::Function* function = llvm::dyn_cast<llvm::Function>( module->getOrInsertFunction("func2", ftype).getCallee()); llvm::BasicBlock* return_block = llvm::BasicBlock::Create(context, "", function); b->SetInsertPoint(return_block); [[maybe_unused]] llvm::ReturnInst* ret = b->CreateRet( llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(context))); return std::make_pair(function, return_block); } TEST_F(IrEmitterTest, ComputeFuncStack) { llvm::LLVMContext context; auto module = std::make_unique<llvm::Module>("test", context); const char* hlo_text = R"( HloModule m ENTRY main { ROOT %zero = f32[] constant(0) })"; TF_ASSERT_OK_AND_ASSIGN(auto hlo, ParseAndReturnUnverifiedModule(hlo_text)); const HloInstruction* zero = FindInstruction(hlo.get(), "zero"); ASSERT_NE(zero, nullptr); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<BufferAssignment> buffer_assignment, BufferAssigner::Run( hlo.get(), std::make_unique<DependencyHloOrdering>(hlo.get()), backend().compiler()->BufferSizeBytesFunction(), [](LogicalBuffer::Color) { return 1; })); TargetMachineFeaturesWithFakeAlignmentLogic target_machine( [](int64_t size) { return 1; }); IrEmitter ir_emitter(nullptr, *hlo, *buffer_assignment, module.get(), {}, {}, {}, &target_machine, false); llvm::IRBuilder<>* b = ir_emitter.b(); ASSERT_NE(b, nullptr); const std::pair<llvm::Function*, llvm::BasicBlock*> fb = CreateFunction(context, module.get(), b); llvm::Function* function = fb.first; llvm::BasicBlock* return_block = fb.second; ASSERT_NE(function, nullptr); ASSERT_NE(return_block, nullptr); const auto funcname = "func1"; const auto linkagetype = llvm::GlobalValue::LinkageTypes::ExternalLinkage; const HloModuleConfig module_config; ir_emitter.PushComputeFunction(funcname, linkagetype, module_config, module.get(), 0); ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(), funcname); ir_emitter.PushComputeFunction(b, module.get(), 0, function, nullptr, return_block); ASSERT_EQ(ir_emitter.compute_function()->function(), function); ir_emitter.PopComputeFunction(); ASSERT_EQ(ir_emitter.compute_function()->function()->getName().str(), funcname); ir_emitter.PopComputeFunction(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/ir_emitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
94482255-ec7d-455f-8827-0a005b4924b8
cpp
tensorflow/tensorflow
execution_stream_assignment
third_party/xla/xla/service/gpu/execution_stream_assignment.cc
third_party/xla/xla/service/gpu/execution_stream_assignment_test.cc
#include "xla/service/gpu/execution_stream_assignment.h" #include <deque> #include <memory> #include <utility> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/call_graph.h" #include "xla/service/gpu/runtime/thunk.h" namespace xla::gpu { ExecutionStreamAssignment::ExecutionStreamAssignment( const HloModule* module, ExecutionStreamAssignmentOptions options) { std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module); ExecutionStreamId next_stream_id = ExecutionStreamId(1); struct Pending { Pending(HloComputation* node, ExecutionStreamId stream_id) : node(node), stream_id(stream_id) {} HloComputation* node; ExecutionStreamId stream_id; }; std::deque<Pending> queue; queue.emplace_back(module->entry_computation(), ExecutionStreamId(0)); auto enqueue_called_computations = [&](const CallSite& callsite, ExecutionStreamId stream) { if (GetInstructionCallContext(callsite.instruction()->opcode()) == CallContext::kEmbedded) { return; } for (HloComputation* computation : callsite.called_computations()) { queue.emplace_back(computation, stream); } }; auto assign_async_execution_streams = [&](HloInstruction* instruction, ExecutionStreamId source_stream_id) { AsyncExecutionStreamIds streams; streams.source_stream_id = source_stream_id; streams.destination_stream_id = next_stream_id; CHECK(async_instructions_.try_emplace(instruction, streams).second); next_stream_id++; if (next_stream_id.value() > options.number_of_execution_streams) { next_stream_id = ExecutionStreamId(1); } }; while (!queue.empty()) { Pending pending = queue.front(); queue.pop_front(); for (HloInstruction* instruction : pending.node->instructions()) { if (instruction->IsAsynchronous()) continue; if (instruction->opcode() == HloOpcode::kCopyStart) { assign_async_execution_streams(instruction, pending.stream_id); } else { CHECK(sync_instructions_.try_emplace(instruction, pending.stream_id) .second); } } for (const CallSite& callsite : call_graph->GetNode(pending.node).callsites()) { if (callsite.instruction()->IsAsynchronous()) { CHECK_EQ(callsite.instruction()->opcode(), HloOpcode::kAsyncStart); enqueue_called_computations(callsite, next_stream_id); assign_async_execution_streams(callsite.instruction(), pending.stream_id); } else { enqueue_called_computations(callsite, pending.stream_id); } } for (HloInstruction* instruction : pending.node->instructions()) { if (!instruction->IsAsynchronous()) continue; if (instruction->opcode() == HloOpcode::kAsyncStart) { CHECK(async_instructions_.find(instruction) != async_instructions_.end()); } else { HloInstruction* async_start = Cast<HloAsyncInstruction>(instruction)->async_chain_start(); AsyncExecutionStreamIds async_start_streams = async_instructions_.at(async_start); CHECK(async_instructions_.try_emplace(instruction, async_start_streams) .second); } } } } namespace { absl::Status StreamNotFoundError(const HloInstruction* instruction) { return absl::NotFoundError(absl::StrCat( "No ExecutionStreamId found for ", instruction->ToString(), "; this may happen if the Computation is not reachable from the module's " "entrypoint, or if it's only reachable through a embedded calls.")); } } absl::StatusOr<ExecutionStreamId> ExecutionStreamAssignment::GetSyncExecutionStreamId( const HloInstruction* instruction) const { CHECK(!instruction->IsAsynchronous()); auto stream = sync_instructions_.find(instruction); if (stream == sync_instructions_.end()) { return StreamNotFoundError(instruction); } return stream->second; } absl::StatusOr<ExecutionStreamAssignment::AsyncExecutionStreamIds> ExecutionStreamAssignment::GetAsyncExecutionStreamIds( const HloInstruction* instruction) const { CHECK(instruction->IsAsynchronous() || instruction->opcode() == HloOpcode::kCopyStart); auto streams = async_instructions_.find(instruction); if (streams == async_instructions_.end()) { return StreamNotFoundError(instruction); } return streams->second; } }
#include "xla/service/gpu/execution_stream_assignment.h" #include <memory> #include <string_view> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" using ::tsl::testing::IsOkAndHolds; using ::tsl::testing::StatusIs; using AsyncExecutionStreamIds = ::xla::gpu::ExecutionStreamAssignment::AsyncExecutionStreamIds; namespace xla::gpu { namespace { class ExecutionStreamAssignmentTest : public HloTestBase { protected: void ExpectExecutionStreamForSyncInstructions( const ExecutionStreamAssignment& assignment, HloComputation* computation, ExecutionStreamId stream) const { for (const HloInstruction* instruction : computation->instructions()) { if (instruction->IsAsynchronous()) continue; EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction), IsOkAndHolds(stream)); } } }; TEST_F(ExecutionStreamAssignmentTest, AsyncFusion) { const char* kModuleStr = R"( HloModule m leaf1 { p0 = f32[2,2] parameter(0) ROOT add = f32[2,2] add(p0, p0) } leaf2 { p0 = f32[2,2] parameter(0) ROOT add = f32[2,2] add(p0, p0) } leaf3 { p0 = f32[2,2] parameter(0) ROOT add = f32[2,2] add(p0, p0) } ENTRY entry { p0 = f32[2,2] parameter(0) start1 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0), kind=kLoop, calls=leaf1 start2 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0), kind=kLoop, calls=leaf2 start3 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0), kind=kLoop, calls=leaf3 update1 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start1) update2 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start2) update3 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start3) done1 = f32[2,2] fusion-done(update1) done2 = f32[2,2] fusion-done(update2) done3 = f32[2,2] fusion-done(update3) ROOT done = f32[2,2] custom-call(done1, done2, done3), custom_call_target="target" } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); ExecutionStreamAssignment assignment( module.get(), ExecutionStreamAssignmentOptions{2}); ExpectExecutionStreamForSyncInstructions( assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0)); for (std::string_view instruction : {"start1", "update1", "done1"}) { EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>( FindInstruction(module.get(), instruction))), IsOkAndHolds(AsyncExecutionStreamIds{ ExecutionStreamId(0), ExecutionStreamId(1)})); } for (std::string_view instruction : {"start2", "update2", "done2"}) { EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>( FindInstruction(module.get(), instruction))), IsOkAndHolds(AsyncExecutionStreamIds{ ExecutionStreamId(0), ExecutionStreamId(2)})); } for (std::string_view instruction : {"start3", "update3", "done3"}) { EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>( FindInstruction(module.get(), instruction))), IsOkAndHolds(AsyncExecutionStreamIds{ ExecutionStreamId(0), ExecutionStreamId(1)})); } ExpectExecutionStreamForSyncInstructions( assignment, Cast<HloAsyncInstruction>(FindInstruction(module.get(), "start1")) ->async_wrapped_computation(), ExecutionStreamId(1)); ExpectExecutionStreamForSyncInstructions( assignment, Cast<HloAsyncInstruction>(FindInstruction(module.get(), "start2")) ->async_wrapped_computation(), ExecutionStreamId(2)); } TEST_F(ExecutionStreamAssignmentTest, CopyStartStreamIdTest) { const char* const hlo_copy_start_string = R"( HloModule Module ENTRY CopyStartAndCopyDone { p0 = f32[2,3]{1,0:S(1)} parameter(0) copy-start = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0) ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_copy_start_string)); ExecutionStreamAssignment assignment(module.get()); for (std::string_view instruction : {"copy-start"}) { EXPECT_THAT( assignment.GetAsyncExecutionStreamIds(Cast<HloCopyStartInstruction>( FindInstruction(module.get(), instruction))), IsOkAndHolds(AsyncExecutionStreamIds{ ExecutionStreamId(0), ExecutionStreamId(1)})); } } TEST_F(ExecutionStreamAssignmentTest, FusionComputations) { const char* kModuleStr = R"( HloModule m reduce { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { p0 = f32[4] parameter(0) c0 = f32[] constant(0) ROOT reduce = f32[] reduce(p0, c0), dimensions={0}, to_apply=reduce } ENTRY entry { p0 = f32[4] parameter(0) ROOT done = f32[] fusion(p0), kind=kLoop, calls=fusion } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); ExecutionStreamAssignment assignment(module.get()); ExpectExecutionStreamForSyncInstructions( assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0)); for (std::string_view computation : {"reduce", "fusion"}) { for (const HloInstruction* instruction : FindComputation(module.get(), computation)->instructions()) { EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction), StatusIs(absl::StatusCode::kNotFound)); } } } TEST_F(ExecutionStreamAssignmentTest, UnreachableComputation) { const char* kModuleStr = R"( HloModule m unreachable { p0 = f32[2,2] parameter(0) ROOT add = f32[2,2] add(p0, p0) } ENTRY entry { p0 = f32[2,2] parameter(0) ROOT add = f32[2,2] add(p0, p0) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr)); ExecutionStreamAssignment assignment(module.get()); ExpectExecutionStreamForSyncInstructions( assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0)); for (const HloInstruction* instruction : FindComputation(module.get(), "unreachable")->instructions()) { EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction), StatusIs(absl::StatusCode::kNotFound)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/execution_stream_assignment.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/execution_stream_assignment_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2e67453e-51a6-4245-90a4-4e34db3316cd
cpp
tensorflow/tensorflow
hlo_algorithm_denylist
third_party/xla/xla/service/gpu/hlo_algorithm_denylist.cc
third_party/xla/xla/service/gpu/hlo_algorithm_denylist_test.cc
#include "xla/service/gpu/hlo_algorithm_denylist.h" #include <optional> #include <string> #include <tuple> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" #include "xla/debug_options_flags.h" #include "xla/hlo/ir/backend_config.h" #include "xla/service/gpu/autotuning/gpu_autotuning.pb.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/stream_executor/dnn.h" #include "tsl/platform/env.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/status.h" namespace xla { namespace gpu { constexpr char kDefaultDenylist[] = R"pb( entries { hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\"" backend_config { operation_queue_id: 0 wait_on_operation_queues: [] cudnn_conv_backend_config: { activation_mode: kNone conv_result_scale: 1 side_input_scale: 0 leakyrelu_alpha: 0 }, force_earliest_schedule: false } cc { major: 7 } cudnn_version { major: 9 } algos { id: 14 } } entries { hlo: "(f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\"" backend_config { operation_queue_id: 0 wait_on_operation_queues: [] cudnn_conv_backend_config: { activation_mode: kNone conv_result_scale: 1 side_input_scale: 0 leakyrelu_alpha: 0 }, force_earliest_schedule: false } cc { major: 7 } cudnn_version { major: 9 minor: 1 patch: 1 } algos { id: 14 } } entries { hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\"" backend_config { operation_queue_id: 0 wait_on_operation_queues: [] cudnn_conv_backend_config: { activation_mode: kNone conv_result_scale: 1 side_input_scale: 1, leakyrelu_alpha: 0 }, force_earliest_schedule: false } cc { major: 7 } cudnn_version { major: 9 } algos { id: 14 } } entries { hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\"" backend_config { operation_queue_id: 0 wait_on_operation_queues: [] cudnn_conv_backend_config: { activation_mode: kNone conv_result_scale: 1 side_input_scale: 1 leakyrelu_alpha: 0 }, force_earliest_schedule: false } cc { major: 7 minor: 5 } cudnn_version { major: 9 } algos { id: 14 } } entries { hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\"" backend_config { operation_queue_id: 0 wait_on_operation_queues: [] cudnn_conv_backend_config: { activation_mode: kNone conv_result_scale: 1 side_input_scale: 1 leakyrelu_alpha: 0 }, force_earliest_schedule: false } cc { major: 7 } cudnn_version { major: 9 minor: 1 patch: 1 } algos { id: 14 } } entries { hlo: "(f32[27,256,32,32]{3,2,1,0}, u8[0]{0}) custom-call(f32[27,256,32,32]{3,2,1,0}, f32[256,256,3,3]{3,2,1,0}, f32[256]{0}, f32[27,256,32,32]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target=\"__cudnn$convBiasActivationForward\"" backend_config { operation_queue_id: 0 wait_on_operation_queues: [] cudnn_conv_backend_config: { activation_mode: kNone conv_result_scale: 1 side_input_scale: 1 leakyrelu_alpha: 0 }, force_earliest_schedule: false } cc { major: 7 minor: 5 } cudnn_version { major: 9 minor: 1 patch: 1 } algos { id: 14 } } )pb"; std::vector<stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms( ComputeCapability cc, CudnnVersion cudnn_version, const std::string& blas_version, const std::string& hlo) { using MapType = absl::flat_hash_map< std::tuple<std::string, int, int, int, int, int, std::string>, std::vector<stream_executor::dnn::AlgorithmDesc>>; static MapType* denylist = [] { auto* list = new MapType(); AlgorithmDenylist proto; auto process_denylist = [list](const AlgorithmDenylist& proto) { for (const auto& entry : proto.entries()) { for (const auto& algo : entry.algos()) { (*list)[std::make_tuple(HloStringWithGpuBackendConfig( entry.hlo(), entry.backend_config()), entry.cc().major(), entry.cc().minor(), entry.cudnn_version().major(), entry.cudnn_version().minor(), entry.cudnn_version().patch(), entry.blas_version())] .emplace_back(algo.id(), algo.tensor_ops(), std::nullopt); } } }; std::string file_path = GetDebugOptionsFromFlags().xla_gpu_algorithm_denylist_path(); if (!file_path.empty()) { TF_CHECK_OK(tsl::ReadTextProto(tsl::Env::Default(), file_path, &proto)); process_denylist(proto); } CHECK(tsl::protobuf::TextFormat::ParseFromString( std::string(kDefaultDenylist), &proto)); process_denylist(proto); return list; }(); std::vector<stream_executor::dnn::AlgorithmDesc> algorithms; auto add_matching_disabled_algorithms_to_result = [&](const auto& key) { auto iter = denylist->find(key); if (iter != denylist->end()) { algorithms.insert(algorithms.end(), iter->second.begin(), iter->second.end()); } }; auto key = std::make_tuple(hlo, cc.major(), cc.minor(), cudnn_version.major(), cudnn_version.minor(), cudnn_version.patch(), blas_version); add_matching_disabled_algorithms_to_result(key); std::get<6>(key) = std::string{}; add_matching_disabled_algorithms_to_result(key); return algorithms; } std::string HloStringWithGpuBackendConfig(const std::string& hlo, GpuBackendConfig config) { BackendConfigWrapper backend_config(config); return absl::StrCat(hlo, ", backend_config=", backend_config.GetRawString()); } } }
#include "xla/service/gpu/hlo_algorithm_denylist.h" #include <cstdlib> #include <string> #include "absl/strings/str_cat.h" #include "xla/stream_executor/dnn.h" #include "xla/tests/test_utils.h" #include "tsl/platform/env.h" #include "tsl/platform/path.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { class DenylistTest : public testing::Test { protected: DenylistTest() { std::string existing_xla_flags; const char* env = std::getenv("XLA_FLAGS"); if (env != nullptr) { existing_xla_flags = absl::StrCat(env, " "); } tsl::setenv( "XLA_FLAGS", absl::StrCat( existing_xla_flags, "--xla_gpu_algorithm_denylist_path=", tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu", "data", "hlo_algorithm_denylist.pbtxt")) .data(), 1); config_ = ParseTextProto<GpuBackendConfig>( "operation_queue_id: 0 wait_on_operation_queues: [] " "cudnn_conv_backend_config: { activation_mode: kNone " "conv_result_scale: 1 side_input_scale: 0 leakyrelu_alpha: 0} " "force_earliest_schedule: false") .value(); } GpuBackendConfig config_; }; TEST_F(DenylistTest, DefaultTest) { ComputeCapability cc; cc.set_major(7); cc.set_minor(0); CudnnVersion cudnn_version; cudnn_version.set_major(7); cudnn_version.set_minor(6); cudnn_version.set_patch(2); auto list = GetDisabledConvAlgorithms( cc, cudnn_version, "9000", HloStringWithGpuBackendConfig( R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")", config_)); EXPECT_THAT(list, testing::UnorderedElementsAre( stream_executor::dnn::AlgorithmDesc{0, true}, stream_executor::dnn::AlgorithmDesc{0, false}, stream_executor::dnn::AlgorithmDesc{1, true}, stream_executor::dnn::AlgorithmDesc{1, false}, stream_executor::dnn::AlgorithmDesc{42, true}, stream_executor::dnn::AlgorithmDesc{42, false})); } TEST_F(DenylistTest, NegativeTest) { ComputeCapability cc; cc.set_major(7); cc.set_minor(0); CudnnVersion cudnn_version; cudnn_version.set_major(7); cudnn_version.set_minor(6); cudnn_version.set_minor(2); auto list = GetDisabledConvAlgorithms(cc, cudnn_version, "9000", R"(invalid hlo)"); EXPECT_THAT(list, testing::IsEmpty()); } TEST_F(DenylistTest, NoBlasVersionSet) { ComputeCapability cc; cc.set_major(7); cc.set_minor(0); CudnnVersion cudnn_version; cudnn_version.set_major(7); cudnn_version.set_minor(6); cudnn_version.set_patch(2); auto list = GetDisabledConvAlgorithms( cc, cudnn_version, "120301", HloStringWithGpuBackendConfig( R"((f16[256,112,112,64]{3,2,1,0}, u8[0]{0}) custom-call(f16[256,224,224,4]{3,2,1,0}, f16[7,7,4,64]{2,1,0,3}), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward")", config_)); EXPECT_THAT(list, testing::UnorderedElementsAre( stream_executor::dnn::AlgorithmDesc{42, true}, stream_executor::dnn::AlgorithmDesc{42, false})); } TEST_F(DenylistTest, EntryFromHardcodedList) { ComputeCapability cc; cc.set_major(7); cc.set_minor(0); CudnnVersion cudnn_version; cudnn_version.set_major(9); cudnn_version.set_minor(0); cudnn_version.set_patch(0); auto list = GetDisabledConvAlgorithms( cc, cudnn_version, "9000", HloStringWithGpuBackendConfig( R"((f32[512,512,7,7]{3,2,1,0}, u8[0]{0}) custom-call(f32[512,512,7,7]{3,2,1,0}, f32[512,512,3,3]{3,2,1,0}, f32[512]{0}), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_oi01->bf01, custom_call_target="__cudnn$convBiasActivationForward")", config_)); EXPECT_THAT(list, testing::ElementsAre( stream_executor::dnn::AlgorithmDesc{14, false})); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_algorithm_denylist.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_algorithm_denylist_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0147930e-0d97-4677-8292-daa6da3d7261
cpp
tensorflow/tensorflow
matmul_utils
third_party/xla/xla/service/gpu/matmul_utils.cc
third_party/xla/xla/service/gpu/matmul_utils_test.cc
#include "xla/service/gpu/matmul_utils.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <optional> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/algorithm_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/blas.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/gpu/gpu_blas_lt.h" #include "xla/stream_executor/numeric_options.h" #include "xla/stream_executor/stream_executor.h" #include "xla/types.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { absl::StatusOr<std::vector<int64_t>> GetNonContractingDims( const Shape& shape, absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> contracting_dims) { std::vector<int64_t> non_contracting_dims; for (int64_t dim = 0; dim < shape.rank(); ++dim) { bool is_batch = absl::c_count(batch_dims, dim) != 0; bool is_contracting = absl::c_count(contracting_dims, dim) != 0; TF_RET_CHECK(!(is_batch && is_contracting)); if (!(is_batch || is_contracting)) non_contracting_dims.push_back(dim); } TF_RET_CHECK(batch_dims.size() + contracting_dims.size() + non_contracting_dims.size() == shape.rank()); return non_contracting_dims; } const tsl::protobuf::RepeatedField<int64_t>& BatchDimensionsForOperand( const HloInstruction& dot, const int operand_number) { const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers(); if (operand_number == 0) { return dimension_numbers.lhs_batch_dimensions(); } return dimension_numbers.rhs_batch_dimensions(); } absl::StatusOr<int64_t> ContractingDimensionIndex(const HloInstruction& dot, const int operand_number) { const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers(); if (operand_number == 0) { TF_RET_CHECK(dimension_numbers.lhs_contracting_dimensions().size() == 1); return dimension_numbers.lhs_contracting_dimensions(0); } TF_RET_CHECK(dimension_numbers.rhs_contracting_dimensions().size() == 1); return dimension_numbers.rhs_contracting_dimensions(0); } absl::StatusOr<int64_t> NonContractingDimensionIndex(const HloInstruction& dot, const int operand_number) { TF_ASSIGN_OR_RETURN(int64_t contracting_dim, ContractingDimensionIndex(dot, operand_number)); TF_ASSIGN_OR_RETURN( std::vector<int64_t> non_contracting_dims, GetNonContractingDims(dot.operand(operand_number)->shape(), BatchDimensionsForOperand(dot, operand_number), {contracting_dim})); TF_RET_CHECK(non_contracting_dims.size() == 1); return non_contracting_dims.front(); } absl::StatusOr<Shape> GetBatchRowColumnShape( const Shape& shape, absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> row_dims, absl::Span<const int64_t> col_dims) { TF_RET_CHECK(shape.has_layout()); std::vector<int64_t> minor_to_major; for (size_t i = 0; i < shape.rank();) { auto check_physically_sequential = [&](absl::Span<const int64_t> dims) -> absl::Status { for (auto it = dims.rbegin(); it != dims.rend(); ++it) { if (*it != shape.layout().minor_to_major()[i++]) return InvalidArgument("dims not physically_sequential"); } return absl::OkStatus(); }; int64_t dim = shape.layout().minor_to_major()[i]; if (!row_dims.empty() && dim == row_dims.back()) { minor_to_major.push_back(1); TF_RETURN_IF_ERROR(check_physically_sequential(row_dims)); } else if (!col_dims.empty() && dim == col_dims.back()) { minor_to_major.push_back(2); TF_RETURN_IF_ERROR(check_physically_sequential(col_dims)); } else if (!batch_dims.empty() && (dim == batch_dims.back())) { minor_to_major.push_back(0); TF_RETURN_IF_ERROR(check_physically_sequential(batch_dims)); } else { return InvalidArgument("dims not physically sequential"); } } if (col_dims.empty()) minor_to_major.push_back(2); if (row_dims.empty()) minor_to_major.push_back(1); if (batch_dims.empty()) minor_to_major.push_back(0); auto dim_size = [&](absl::Span<const int64_t> dims) { return absl::c_accumulate(dims, 1, [&](int64_t size, int64_t dim) { return size * shape.dimensions(dim); }); }; return ShapeUtil::MakeShapeWithDenseLayout( shape.element_type(), {dim_size(batch_dims), dim_size(row_dims), dim_size(col_dims)}, minor_to_major); } absl::StatusOr<MatrixLayout> MatrixLayout::For(const Shape& shape) { TF_RET_CHECK(shape.rank() == 3); TF_RET_CHECK(shape.has_layout()); int64_t batch_size = shape.dimensions(0); int64_t num_rows = shape.dimensions(1); int64_t num_cols = shape.dimensions(2); Order order{Order::kRowMajor}; int64_t leading_dim_stride = num_cols; int64_t batch_stride = num_rows * num_cols; absl::Span<const int64_t> minor_to_major = shape.layout().minor_to_major(); switch (64 * minor_to_major[2] + 8 * minor_to_major[1] + minor_to_major[0]) { case 012: break; case 021: order = Order::kColumnMajor; leading_dim_stride = num_rows; break; case 0102: leading_dim_stride = batch_size * num_cols; batch_stride = num_cols; break; case 0201: order = Order::kColumnMajor; leading_dim_stride = batch_size * num_rows; batch_stride = num_rows; break; default: return Unimplemented("batch in most minor dimension"); } if (batch_size == 1) { batch_stride = 0; } return MatrixLayout{se::gpu::MatrixLayout{shape.element_type(), num_rows, num_cols, order, batch_size, leading_dim_stride, batch_stride}}; } absl::StatusOr<MatrixLayout> MatrixLayout::For( const Shape& shape, absl::Span<const int64_t> batch_dims, absl::Span<const int64_t> row_dims, absl::Span<const int64_t> col_dims) { TF_ASSIGN_OR_RETURN( Shape batch_row_col_shape, GetBatchRowColumnShape(shape, batch_dims, row_dims, col_dims)); return MatrixLayout::For(batch_row_col_shape); } absl::StatusOr<MatrixLayout> MatrixLayout::For( const Shape& shape, size_t lhs_num_batch_dims, size_t lhs_num_row_dims, size_t rhs_num_batch_dims, size_t rhs_num_col_dims) { size_t num_batch_dims = std::max(lhs_num_batch_dims, rhs_num_batch_dims); TF_RET_CHECK(shape.rank() == num_batch_dims + lhs_num_row_dims + rhs_num_col_dims); std::vector<int64_t> dims(shape.rank()); absl::c_iota(dims, 0); auto batch_dims = absl::Span<const int64_t>(dims).first(num_batch_dims); auto row_dims = absl::Span<const int64_t>(dims).subspan(num_batch_dims, lhs_num_row_dims); auto col_dims = absl::Span<const int64_t>(dims).last(rhs_num_col_dims); return MatrixLayout::For(shape, batch_dims, row_dims, col_dims); } namespace { std::vector<int64_t> NormalizedRelativeOrder(absl::Span<const int64_t> dims) { std::vector<int64_t> indices(dims.size()); absl::c_iota(indices, 0); absl::c_sort(indices, [&](int64_t a, int64_t b) { return dims[a] < dims[b]; }); return indices; } } absl::StatusOr<bool> CanFoldTransposeOperandIntoDot(const HloInstruction& dot, int64_t operand_idx) { if (Cast<HloDotInstruction>(&dot)->sparse_operands()) { return false; } TF_RET_CHECK(dot.opcode() == HloOpcode::kDot); TF_RET_CHECK(dot.operand_count() > operand_idx); const HloInstruction& transpose = *dot.operand(operand_idx); TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose); const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers(); auto transposed = [&](const auto& dims) { std::vector<int64_t> transposed_dims; transposed_dims.reserve(dims.size()); for (int64_t dim : dims) { transposed_dims.push_back(transpose.dimensions(dim)); } return transposed_dims; }; auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions() : dot_dims.rhs_batch_dimensions(); auto contracting_dims = (operand_idx == 0) ? dot_dims.lhs_contracting_dimensions() : dot_dims.rhs_contracting_dimensions(); TF_ASSIGN_OR_RETURN( std::vector<int64_t> non_contracting_dims, GetNonContractingDims(transpose.shape(), batch_dims, contracting_dims)); auto transposed_non_contracting_dims = transposed(non_contracting_dims); if (NormalizedRelativeOrder(non_contracting_dims) != NormalizedRelativeOrder(transposed_non_contracting_dims)) { return false; } return MatrixLayout::For(transpose.operand(0)->shape(), transposed(batch_dims), transposed(contracting_dims), transposed_non_contracting_dims) .ok(); } absl::StatusOr<GemmConfig> GemmConfig::For( const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims, absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape, absl::Span<const int64_t> rhs_batch_dims, absl::Span<const int64_t> rhs_contracting_dims, const Shape& output_shape, double alpha_real, double alpha_imag, double beta, PrecisionConfig::Algorithm precision_algorithm, std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x, bool grad_y) { return GemmConfig::For(lhs_shape, lhs_batch_dims, lhs_contracting_dims, rhs_shape, rhs_batch_dims, rhs_contracting_dims, output_shape, nullptr, output_shape, alpha_real, alpha_imag, beta, precision_algorithm, algorithm, compute_precision, grad_x, grad_y); } absl::StatusOr<GemmConfig> GemmConfig::For( const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims, absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape, absl::Span<const int64_t> rhs_batch_dims, absl::Span<const int64_t> rhs_contracting_dims, const Shape& c_shape, const Shape* bias_shape_ptr, const Shape& output_shape, double alpha_real, double alpha_imag, double beta, PrecisionConfig::Algorithm precision_algorithm, std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x, bool grad_y) { absl::Span<const int64_t> lhs_col_dims = lhs_contracting_dims; TF_ASSIGN_OR_RETURN( std::vector<int64_t> lhs_row_dims, GetNonContractingDims(lhs_shape, lhs_batch_dims, lhs_col_dims)); TF_ASSIGN_OR_RETURN( MatrixLayout lhs_layout, MatrixLayout::For(lhs_shape, lhs_batch_dims, lhs_row_dims, lhs_col_dims)); absl::Span<const int64_t> rhs_row_dims = rhs_contracting_dims; TF_ASSIGN_OR_RETURN( std::vector<int64_t> rhs_col_dims, GetNonContractingDims(rhs_shape, rhs_batch_dims, rhs_row_dims)); TF_ASSIGN_OR_RETURN( MatrixLayout rhs_layout, MatrixLayout::For(rhs_shape, rhs_batch_dims, rhs_row_dims, rhs_col_dims)); int64_t num_batch_dims = std::max(lhs_batch_dims.size(), rhs_batch_dims.size()); TF_RET_CHECK(output_shape.rank() == num_batch_dims + lhs_row_dims.size() + rhs_col_dims.size()); std::vector<int64_t> output_dims(output_shape.rank()); absl::c_iota(output_dims, 0); auto output_batch_dims = absl::Span<const int64_t>(output_dims).first(num_batch_dims); auto output_row_dims = absl::Span<const int64_t>(output_dims) .subspan(num_batch_dims, lhs_row_dims.size()); auto output_col_dims = absl::Span<const int64_t>(output_dims).last(rhs_col_dims.size()); TF_ASSIGN_OR_RETURN(MatrixLayout output_layout, MatrixLayout::For(output_shape, output_batch_dims, output_row_dims, output_col_dims)); Shape c_matrix_shape = c_shape; if (primitive_util::IsF8Type(lhs_shape.element_type()) && primitive_util::IsF8Type(output_shape.element_type()) && beta == 0.0) { #if GOOGLE_CUDA c_matrix_shape.set_element_type( bias_shape_ptr != nullptr ? bias_shape_ptr->element_type() : BF16); #endif } TF_ASSIGN_OR_RETURN(MatrixLayout c_layout, MatrixLayout::For(c_matrix_shape, output_batch_dims, output_row_dims, output_col_dims)); if (lhs_shape.element_type() != F8E4M3FN && lhs_shape.element_type() != F8E5M2) { TF_RET_CHECK(lhs_layout.num_cols == rhs_layout.num_rows); TF_RET_CHECK(output_layout.num_rows == lhs_layout.num_rows); TF_RET_CHECK(output_layout.num_cols == rhs_layout.num_cols); } TF_RET_CHECK(c_layout.num_rows == output_layout.num_rows); TF_RET_CHECK(c_layout.num_cols == output_layout.num_cols); TF_RET_CHECK((lhs_layout.batch_size == output_layout.batch_size) || (lhs_layout.batch_size == 1)); TF_RET_CHECK((rhs_layout.batch_size == output_layout.batch_size) || (rhs_layout.batch_size == 1)); switch (output_shape.element_type()) { case F8E4M3FN: case F8E5M2: case F8E4M3FNUZ: case F8E5M2FNUZ: case F16: case BF16: case F32: case F64: TF_RET_CHECK(alpha_imag == 0); break; case C64: case C128: break; case S32: TF_RET_CHECK(alpha_imag == 0); if (lhs_layout.dtype != PrimitiveType::S8 || rhs_layout.dtype != PrimitiveType::S8) { return Internal( "For int32 gemm output only int8 input is supported, got input: " "%s, %s", primitive_util::LowercasePrimitiveTypeName(lhs_layout.dtype), primitive_util::LowercasePrimitiveTypeName(rhs_layout.dtype)); } break; default: return Internal("Unexpected GEMM datatype: %s", primitive_util::LowercasePrimitiveTypeName( output_shape.element_type())); } return GemmConfig{lhs_layout, rhs_layout, c_layout, output_layout, {alpha_real, alpha_imag}, beta, compute_precision, precision_algorithm, algorithm, grad_x, grad_y}; } namespace { bool IsTf32Allowed(PrecisionConfig::Algorithm algorithm, int64_t compute_precision) { if (algorithm == PrecisionConfig::ALG_UNSET) { return compute_precision <= 1; } return algorithm_util::HasTf32InputType(algorithm); } } absl::StatusOr<GemmConfig> GemmConfig::For( const HloInstruction* gemm) { TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, gemm->backend_config<GpuBackendConfig>()); return For(gemm, gpu_config.gemm_backend_config()); } absl::StatusOr<GemmConfig> GemmConfig::For( const HloInstruction* gemm, const GemmBackendConfig& config) { std::optional<int64_t> algorithm; if (config.algorithm_case() != GemmBackendConfig::ALGORITHM_NOT_SET) { algorithm = config.selected_algorithm(); } else { algorithm = se::blas::kDefaultAlgorithm; } const Shape& lhs_shape = gemm->operand(0)->shape(); const Shape& rhs_shape = gemm->operand(1)->shape(); const DotDimensionNumbers& dot_dims = config.dot_dimension_numbers(); const Shape& output_shape = gemm->shape().IsTuple() ? gemm->shape().tuple_shapes(0) : gemm->shape(); bool has_matrix_bias = config.beta() != 0.; Shape c_shape = has_matrix_bias ? gemm->operand(2)->shape() : output_shape; std::optional<Shape> vector_bias_shape; TF_ASSIGN_OR_RETURN( bool has_vector_bias, xla::gpu::gpublas_lt::EpilogueAddsVectorBias(config.epilogue())); if (has_vector_bias) { int vector_bias_index = has_matrix_bias ? 3 : 2; if (primitive_util::IsF8Type(lhs_shape.element_type())) { vector_bias_index += 2; } vector_bias_shape = gemm->operand(vector_bias_index)->shape(); } auto attributes = gemm->frontend_attributes().map(); bool grad_x = (attributes["grad_x"] == "true"); bool grad_y = (attributes["grad_y"] == "true"); int64_t precision = se::blas::kDefaultComputePrecision; for (auto operand_precision : config.precision_config().operand_precision()) { precision = std::max(precision, static_cast<int64_t>(operand_precision)); } const PrecisionConfig::Algorithm precision_algorithm = config.precision_config().algorithm(); return GemmConfig::For( lhs_shape, dot_dims.lhs_batch_dimensions(), dot_dims.lhs_contracting_dimensions(), rhs_shape, dot_dims.rhs_batch_dimensions(), dot_dims.rhs_contracting_dimensions(), c_shape, vector_bias_shape ? &vector_bias_shape.value() : nullptr, output_shape, config.alpha_real(), config.alpha_imag(), config.beta(), precision_algorithm, algorithm, precision, grad_x, grad_y); } absl::StatusOr<GemmConfig::DescriptorsTuple> GemmConfig::GetMatrixDescriptors( se::DeviceMemoryBase lhs_buf, se::DeviceMemoryBase rhs_buf, se::DeviceMemoryBase out_buf) const { auto create_matrix_desc = [](const se::gpu::MatrixLayout& layout, se::DeviceMemoryBase data) -> absl::StatusOr<se::gpu::MatrixDescriptor> { TF_ASSIGN_OR_RETURN(se::blas::DataType type, se::gpu::AsBlasDataType(layout.dtype)); return se::gpu::MatrixDescriptor{ data, layout.leading_dim_stride, layout.batch_stride, type, (layout.order == se::gpu::MatrixLayout::Order::kColumnMajor ? se::blas::Transpose::kNoTranspose : se::blas::Transpose::kTranspose)}; }; se::gpu::MatrixLayout lhs = lhs_layout, rhs = rhs_layout, out = output_layout; bool must_swap_operands = MakeOutputColumnMajor(lhs, rhs, out); if (must_swap_operands) { std::swap(lhs_buf, rhs_buf); } TF_ASSIGN_OR_RETURN(se::gpu::OutputMatrixDescriptor out_desc, create_matrix_desc(out, out_buf)); out_desc.batch_size = out.batch_size; out_desc.m = out.num_rows; out_desc.n = out.num_cols; out_desc.k = lhs.num_cols; TF_ASSIGN_OR_RETURN(out_desc.compute_type, se::gpu::GetBlasComputationType( PrecisionConfig::ALG_UNSET, lhs.dtype, out.dtype, se::blas::kDefaultComputePrecision)); TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor lhs_desc, create_matrix_desc(lhs, lhs_buf)); TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor rhs_desc, create_matrix_desc(rhs, rhs_buf)); return DescriptorsTuple{lhs_desc, rhs_desc, out_desc, must_swap_operands}; } namespace { template <typename Scale, typename Input, typename Output> absl::Status DoGemmWithAlgorithm(const se::gpu::MatrixDescriptor& lhs, const se::gpu::MatrixDescriptor& rhs, const se::gpu::OutputMatrixDescriptor& output, se::DeviceMemoryBase workspace, Scale alpha, Scale beta, se::Stream* stream, PrecisionConfig::Algorithm precision_algorithm, se::blas::AlgorithmType algorithm, se::blas::ComputePrecision compute_precision, const se::NumericOptions& numeric_options, se::blas::ProfileResult* profile_result, se::blas::CallContext context) { CHECK(output.transpose == se::blas::Transpose::kNoTranspose); PrimitiveType lhs_type = primitive_util::NativeToPrimitiveType<Input>(); PrimitiveType output_type = primitive_util::NativeToPrimitiveType<Output>(); TF_ASSIGN_OR_RETURN( se::blas::ComputationType computation_type, se::gpu::GetBlasComputationType(precision_algorithm, lhs_type, output_type, compute_precision)); se::DeviceMemory<Output> output_data(output.data); auto* blas = stream->parent()->AsBlas(); if (blas == nullptr) { return absl::InternalError("No Blas support for stream"); } se::blas::BlasSupport::ScopedWorkspace scoped_workspace(blas, &workspace); if (output.batch_size != 1) { return blas->BlasGemmStridedBatchedWithAlgorithm( stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k, alpha, lhs.cast<Input>(), lhs.leading_dim_stride, lhs.batch_stride, rhs.cast<Input>(), rhs.leading_dim_stride, rhs.batch_stride, beta, &output_data, output.leading_dim_stride, output.batch_stride, output.batch_size, computation_type, algorithm, numeric_options, profile_result, context); } else { return blas->BlasGemmWithAlgorithm( stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k, alpha, lhs.cast<Input>(), lhs.leading_dim_stride, rhs.cast<Input>(), rhs.leading_dim_stride, beta, &output_data, output.leading_dim_stride, computation_type, algorithm, numeric_options, profile_result, context); } } template <typename Scale, typename Input, typename Output> absl::Status DoGemm(const se::gpu::MatrixDescriptor& lhs, const se::gpu::MatrixDescriptor& rhs, const se::gpu::OutputMatrixDescriptor& output, se::DeviceMemoryBase workspace, Scale alpha, Scale beta, se::Stream* stream, PrecisionConfig::Algorithm precision_algorithm, std::optional<se::blas::AlgorithmType> algorithm, se::blas::ComputePrecision compute_precision, const se::NumericOptions& numeric_options, se::blas::ProfileResult* profile_result, se::blas::CallContext context) { CHECK(output.transpose == se::blas::Transpose::kNoTranspose); se::DeviceMemory<Output> output_data(output.data); auto* blas = stream->parent()->AsBlas(); if (blas == nullptr) { return absl::InternalError("No Blas support for stream"); } if (algorithm) { return DoGemmWithAlgorithm<Scale, Input, Output>( lhs, rhs, output, workspace, alpha, beta, stream, precision_algorithm, *algorithm, compute_precision, numeric_options, profile_result, context); } se::blas::BlasSupport::ScopedWorkspace scoped_workspace(blas, &workspace); if (output.batch_size != 1) { return blas->BlasGemmStridedBatched( stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k, alpha, lhs.cast<Input>(), lhs.leading_dim_stride, lhs.batch_stride, rhs.cast<Input>(), rhs.leading_dim_stride, rhs.batch_stride, beta, &output_data, output.leading_dim_stride, output.batch_stride, output.batch_size, numeric_options, context); } return blas->BlasGemm(stream, lhs.transpose, rhs.transpose, output.m, output.n, output.k, alpha, lhs.cast<Input>(), lhs.leading_dim_stride, rhs.cast<Input>(), rhs.leading_dim_stride, beta, &output_data, output.leading_dim_stride, numeric_options, context); } } absl::Status RunGemm(const GemmConfig& config, se::DeviceMemoryBase lhs_buffer, se::DeviceMemoryBase rhs_buffer, se::DeviceMemoryBase output_buffer, se::DeviceMemoryBase workspace_buffer, bool deterministic_ops, se::Stream* stream, std::optional<se::blas::AlgorithmType> algorithm, se::blas::ProfileResult* profile_result) { VLOG(2) << "Executing a GemmThunk"; TF_ASSIGN_OR_RETURN( GemmConfig::DescriptorsTuple desc, config.GetMatrixDescriptors(lhs_buffer, rhs_buffer, output_buffer)); se::NumericOptions numeric_options{ deterministic_ops, IsTf32Allowed(config.precision_algorithm, config.compute_precision)}; if (!algorithm) algorithm = config.algorithm; se::blas::CallContext context = se::blas::CallContext::kNone; if (config.grad_x) { context = desc.operands_swapped ? se::blas::CallContext::kBackpropInput2 : se::blas::CallContext::kBackpropInput1; } if (config.grad_y) { context = desc.operands_swapped ? se::blas::CallContext::kBackpropInput1 : se::blas::CallContext::kBackpropInput2; } std::tuple operand_types{config.lhs_layout.dtype, config.rhs_layout.dtype, config.output_layout.dtype}; if (config.alpha.real() == 0.0 && config.alpha.imag() == 0.0 && config.beta == 0.0) { return stream->MemZero(&output_buffer, output_buffer.size()); } #define TYPED_GEMM(SCALENTYPE, ATYPE, BTYPE, CTYPE) \ if (operand_types == std::make_tuple(ATYPE, BTYPE, CTYPE)) { \ using NativeScaleType = \ primitive_util::PrimitiveTypeToNative<SCALENTYPE>::type; \ using NativeAType = primitive_util::PrimitiveTypeToNative<ATYPE>::type; \ using NativeCType = primitive_util::PrimitiveTypeToNative<CTYPE>::type; \ return DoGemm<NativeScaleType, NativeAType, NativeCType>( \ desc.lhs, desc.rhs, desc.output, workspace_buffer, \ static_cast<NativeScaleType>(config.alpha.real()), \ static_cast<NativeScaleType>(config.beta), stream, \ config.precision_algorithm, algorithm, config.compute_precision, \ numeric_options, profile_result, context); \ } #define TYPED_GEMM_COMPLEX(SCALENTYPE, ATYPE, BTYPE, CTYPE) \ if (operand_types == std::make_tuple(ATYPE, BTYPE, CTYPE)) { \ using NativeScaleType = \ primitive_util::PrimitiveTypeToNative<SCALENTYPE>::type; \ using NativeAType = primitive_util::PrimitiveTypeToNative<ATYPE>::type; \ using NativeCType = primitive_util::PrimitiveTypeToNative<CTYPE>::type; \ return DoGemm<NativeScaleType, NativeAType, NativeCType>( \ desc.lhs, desc.rhs, desc.output, workspace_buffer, \ static_cast<NativeScaleType>(config.alpha), \ static_cast<NativeScaleType>(config.beta), stream, \ config.precision_algorithm, algorithm, config.compute_precision, \ numeric_options, profile_result, context); \ } if (config.output_layout.dtype == S32) { if (!algorithm) algorithm = se::blas::kDefaultGemmAlgo; return DoGemmWithAlgorithm<int32_t, int8_t, int32_t>( desc.lhs, desc.rhs, desc.output, workspace_buffer, static_cast<int32_t>(config.alpha.real()), static_cast<int32_t>(config.beta), stream, PrecisionConfig::ALG_UNSET, *algorithm, se::blas::kDefaultComputePrecision, numeric_options, profile_result, context); } TYPED_GEMM(F32, BF16, BF16, BF16) TYPED_GEMM(F32, F16, F16, F16) TYPED_GEMM(F32, S8, S8, F32) TYPED_GEMM(F32, BF16, BF16, F32) TYPED_GEMM(F32, F16, F16, F32) TYPED_GEMM(F32, F32, F32, F32) TYPED_GEMM(F64, F64, F64, F64) TYPED_GEMM_COMPLEX(C64, C64, C64, C64) TYPED_GEMM_COMPLEX(C128, C128, C128, C128) #undef TYPED_GEMM #undef TYPED_GEMM_COMPLEX return Internal( "Unexpected GEMM dtype: %s %s %s", primitive_util::LowercasePrimitiveTypeName(config.lhs_layout.dtype), primitive_util::LowercasePrimitiveTypeName(config.rhs_layout.dtype), primitive_util::LowercasePrimitiveTypeName(config.output_layout.dtype)); } namespace gpublas_lt { absl::StatusOr<bool> EpilogueAddsVectorBias( GemmBackendConfig_Epilogue epilogue) { switch (epilogue) { case GemmBackendConfig::DEFAULT: case GemmBackendConfig::RELU: case GemmBackendConfig::GELU: case GemmBackendConfig::GELU_AUX: return false; case GemmBackendConfig::BIAS: case GemmBackendConfig::BIAS_RELU: case GemmBackendConfig::BIAS_GELU: case GemmBackendConfig::BIAS_GELU_AUX: return true; default: return Internal("Unknown Epilogue."); } } absl::StatusOr<bool> EpilogueHasAuxiliaryOutput( GemmBackendConfig_Epilogue epilogue) { switch (epilogue) { case GemmBackendConfig::DEFAULT: case GemmBackendConfig::RELU: case GemmBackendConfig::GELU: case GemmBackendConfig::BIAS: case GemmBackendConfig::BIAS_RELU: case GemmBackendConfig::BIAS_GELU: return false; case GemmBackendConfig::GELU_AUX: case GemmBackendConfig::BIAS_GELU_AUX: return true; default: return Internal("Unknown Epilogue."); } } absl::StatusOr<se::gpu::BlasLt::Epilogue> AsBlasLtEpilogue( GemmBackendConfig_Epilogue epilogue) { switch (epilogue) { case GemmBackendConfig::DEFAULT: return se::gpu::BlasLt::Epilogue::kDefault; case GemmBackendConfig::RELU: return se::gpu::BlasLt::Epilogue::kReLU; case GemmBackendConfig::GELU: return se::gpu::BlasLt::Epilogue::kGELU; case GemmBackendConfig::GELU_AUX: return se::gpu::BlasLt::Epilogue::kGELUWithAux; case GemmBackendConfig::BIAS: return se::gpu::BlasLt::Epilogue::kBias; case GemmBackendConfig::BIAS_RELU: return se::gpu::BlasLt::Epilogue::kBiasThenReLU; case GemmBackendConfig::BIAS_GELU: return se::gpu::BlasLt::Epilogue::kBiasThenGELU; case GemmBackendConfig::BIAS_GELU_AUX: return se::gpu::BlasLt::Epilogue::kBiasThenGELUWithAux; default: return Internal("unexpected epilogue value"); } } } absl::StatusOr<TritonGemmConfig> TritonGemmConfig::FromProto( const AutotuneResult::TritonGemmKey& proto) { TF_RET_CHECK(proto.block_m() > 0); TF_RET_CHECK(proto.block_n() > 0); TF_RET_CHECK(proto.block_k() > 0); TF_RET_CHECK(proto.split_k() > 0); TF_RET_CHECK(proto.num_stages() > 0); TF_RET_CHECK(proto.num_warps() > 0); TF_RET_CHECK(proto.num_ctas() > 0); return TritonGemmConfig(proto.block_m(), proto.block_n(), proto.block_k(), proto.split_k(), proto.num_stages(), proto.num_warps(), proto.num_ctas()); } AutotuneResult::TritonGemmKey TritonGemmConfig::ToProto() const { AutotuneResult::TritonGemmKey key; key.set_block_m(block_m); key.set_block_n(block_n); key.set_block_k(block_k); key.set_split_k(split_k); key.set_num_stages(num_stages); key.set_num_warps(num_warps); key.set_num_ctas(num_ctas); return key; } std::string TritonGemmConfig::ToString() const { return absl::StrCat("{block_m:", block_m, ",block_n:", block_n, ",block_k:", block_k, ",split_k:", split_k, ",num_stages:", num_stages, ",num_warps:", num_warps, ",num_ctas:", num_ctas, "}"); } absl::StatusOr<bool> IsMatrixMultiplicationTooSmallForRewriting( const HloInstruction& dot, int64_t threshold) { CHECK_EQ(dot.opcode(), HloOpcode::kDot); const Shape& lhs_shape = dot.operand(0)->shape(); const Shape& rhs_shape = dot.operand(1)->shape(); const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers(); int64_t contracting_size = 1; for (int64_t dim : dot_dims.lhs_contracting_dimensions()) { contracting_size *= lhs_shape.dimensions(dim); } TF_ASSIGN_OR_RETURN( std::vector<int64_t> lhs_non_contracting_dims, GetNonContractingDims(lhs_shape, dot_dims.lhs_batch_dimensions(), dot_dims.lhs_contracting_dimensions())); int64_t lhs_non_contracting_size = 1; for (int64_t dim : lhs_non_contracting_dims) { lhs_non_contracting_size *= lhs_shape.dimensions(dim); } TF_ASSIGN_OR_RETURN( std::vector<int64_t> rhs_non_contracting_dims, GetNonContractingDims(rhs_shape, dot_dims.rhs_batch_dimensions(), dot_dims.rhs_contracting_dimensions())); int64_t rhs_non_contracting_size = 1; for (int64_t dim : rhs_non_contracting_dims) { rhs_non_contracting_size *= rhs_shape.dimensions(dim); } return (rhs_non_contracting_size + lhs_non_contracting_size) * contracting_size < threshold; } bool IsDotSupportedByClassicalEmitters(const HloInstruction& dot) { if (!algorithm_util::IsSupportedByElementalIrEmitter( dot.precision_config().algorithm())) { return false; } switch (dot.shape().element_type()) { case F16: case F32: case BF16: return true; default: return false; } } } }
#include "xla/service/gpu/matmul_utils.h" #include <cstdint> #include <memory> #include <vector> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/hlo_parser.h" #include "xla/shape.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::tsl::testing::IsOkAndHolds; TEST(GetNonContractingDimsTest, Valid) { Shape shape = ParseShape("f32[1,2,3,4,5,6]").value(); EXPECT_THAT(GetNonContractingDims(shape, {4}, {1, 5}), IsOkAndHolds(ElementsAre(0, 2, 3))); } using CanFoldTransposeOperandIntoDotTest = HloTestBase; TEST_F(CanFoldTransposeOperandIntoDotTest, ArgTransposeFoldGemm) { const char* hlo_text = R"( HloModule ArgTransposeFoldGemm ENTRY AddDotsFunc { x = f32[3,2] parameter(0) y = f32[3,4] parameter(1) x_transposed = f32[2,3] transpose(x), dimensions={1, 0} ROOT dot_a = f32[2,4] dot(x_transposed, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true)); } TEST_F(CanFoldTransposeOperandIntoDotTest, BatchedArgRowColTransposeFoldGemm) { const char* hlo_text = R"( HloModule BatchedArgRowColTransposeFoldGemm ENTRY AddDotsFunc { x = f32[5,3,2] parameter(0) y = f32[5,3,4] parameter(1) x_transposed = f32[5,2,3] transpose(x), dimensions={0, 2, 1} ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true)); } TEST_F(CanFoldTransposeOperandIntoDotTest, BatchRowTransposeFoldGemm) { const char* hlo_text = R"( HloModule BatchRowTransposeFoldCheck ENTRY AddDotsFunc { x = f32[2,5,3] parameter(0) y = f32[5,3,4] parameter(1) x_transposed = f32[5,2,3] transpose(x), dimensions={1, 0, 2} ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true)); } TEST_F(CanFoldTransposeOperandIntoDotTest, BatchFromMinorDimTransposeDoesntFold) { const char* hlo_text = R"( HloModule BatchFromMinorDimTransposeDoesntFold ENTRY AddDotsFunc { x = f32[3,2,5] parameter(0) y = f32[5,3,4] parameter(1) x_transposed = f32[5,2,3] transpose(x), dimensions={2, 1, 0} ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(false)); } TEST_F(CanFoldTransposeOperandIntoDotTest, TransposedNonContractingDimsDontFold) { const char* hlo_text = R"( HloModule TransposedNonContractingDimsDontFold ENTRY AddDotsFunc { x = f32[5,3,4]{2,1,0} parameter(1) y = f32[5,2,6,3]{3,1,2,0} parameter(0) y_transposed = f32[5,6,2,3]{3,2,1,0} transpose(y), dimensions={0, 2, 1, 3} ROOT dot_a = f32[5,4,6,2]{3,2,1,0} dot(x, y_transposed), lhs_contracting_dims={1}, rhs_contracting_dims={3}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 1), IsOkAndHolds(false)); } struct GetBatchRowColumnShapeTestParams { absl::string_view shape; std::vector<int64_t> batch_dims; std::vector<int64_t> row_dims; std::vector<int64_t> col_dims; absl::string_view expected_shape; }; using GetBatchRowColumnShapeTest = ::testing::TestWithParam<GetBatchRowColumnShapeTestParams>; TEST_P(GetBatchRowColumnShapeTest, ValidShape) { const GetBatchRowColumnShapeTestParams& params = GetParam(); Shape shape = ParseShape(params.shape).value(); EXPECT_THAT(GetBatchRowColumnShape(shape, params.batch_dims, params.row_dims, params.col_dims), IsOkAndHolds(ParseShape(params.expected_shape).value())); } INSTANTIATE_TEST_SUITE_P( GetBatchRowColumnShapeTests, GetBatchRowColumnShapeTest, ::testing::ValuesIn<GetBatchRowColumnShapeTestParams>({ {"f32[3,4]{1,0}", {}, {0}, {1}, "f32[1,3,4]{2,1,0}"}, {"f32[3,4]{0,1}", {}, {0}, {1}, "f32[1,3,4]{1,2,0}"}, {"f32[3,4]{1,0}", {}, {1}, {0}, "f32[1,4,3]{1,2,0}"}, {"f32[3,4,5]{2,1,0}", {0}, {1}, {2}, "f32[3,4,5]{2,1,0}"}, {"f32[3,4,5]{2,1,0}", {2}, {1}, {0}, "f32[5,4,3]{0,1,2}"}, {"f32[3,4,5,6,7,8]{5,2,4,1,3,0}", {0, 3}, {1, 4}, {2, 5}, "f32[18,28,40]{2,1,0}"}, })); TEST(GetBatchRowColumnShapeTest, BatchRowsColsInterleaved) { Shape shape = ParseShape("f32[3,4,5,6,7,8]{5,4,3,2,1,0}").value(); auto result = GetBatchRowColumnShape(shape, {0, 3}, {1, 4}, {2, 5}); EXPECT_FALSE(result.ok()); } TEST(GetBatchRowColumnShapeTest, WrongPhysicalOrder) { Shape shape = ParseShape("f32[3,4,5,6]{3,2,0,1}").value(); auto result = GetBatchRowColumnShape(shape, {0, 1}, {2}, {3}); EXPECT_FALSE(result.ok()); } using Order = MatrixLayout::Order; struct GetMatrixLayoutTestParams { absl::string_view shape; int64_t batch_size; int64_t num_rows; int64_t num_cols; Order order; int64_t leading_dim_stride; int64_t batch_stride; }; using GetMatrixLayoutTest = ::testing::TestWithParam<GetMatrixLayoutTestParams>; TEST_P(GetMatrixLayoutTest, ValidShape) { const GetMatrixLayoutTestParams& params = GetParam(); Shape shape = ParseShape(params.shape).value(); MatrixLayout result = MatrixLayout::For(shape).value(); EXPECT_EQ(result.batch_size, params.batch_size); EXPECT_EQ(result.num_rows, params.num_rows); EXPECT_EQ(result.num_cols, params.num_cols); EXPECT_EQ(result.order, params.order); EXPECT_EQ(result.leading_dim_stride, params.leading_dim_stride); EXPECT_EQ(result.batch_stride, params.batch_stride); } INSTANTIATE_TEST_SUITE_P( GetMatrixLayoutTests, GetMatrixLayoutTest, ::testing::ValuesIn<GetMatrixLayoutTestParams>({ {"f32[3,4,5]{2,1,0}", 3, 4, 5, Order::kRowMajor, 5, 20}, {"f32[3,4,5]{1,2,0}", 3, 4, 5, Order::kColumnMajor, 4, 20}, {"f32[3,4,5]{2,0,1}", 3, 4, 5, Order::kRowMajor, 15, 5}, {"f32[3,4,5]{1,0,2}", 3, 4, 5, Order::kColumnMajor, 12, 4}, })); TEST(GetMatrixLayoutTest, BatchInMostMinorPhysicalDimension) { Shape shape = ParseShape("f32[3,4,5]{0,2,1}").value(); EXPECT_FALSE(MatrixLayout::For(shape).ok()); } using GetMatrixSizeRewriteThresholdTest = HloTestBase; TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTooSmallForRewrite) { const char* hlo_text = R"( HloModule DotFuncModule ENTRY DotFunc { x = f32[100,30,3] parameter(0) y = f32[100,3,3] parameter(1) ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100), IsOkAndHolds(true)); } TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulSupportedByClassicalEmitters) { const char* hlo_text = R"( HloModule DotFuncModule ENTRY DotFunc { x = f32[100,30,3] parameter(0) y = f32[100,3,3] parameter(1) ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_TRUE(IsDotSupportedByClassicalEmitters(*dot)); } TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulUnsupportedByClassicalEmitters) { const char* hlo_text = R"( HloModule DotFuncModule ENTRY DotFunc { x = s8[100,30,3] parameter(0) y = s8[100,3,3] parameter(1) ROOT dot = s32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_FALSE(IsDotSupportedByClassicalEmitters(*dot)); } TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulLeftLargeEnoughForRewrite) { const char* hlo_text = R"( HloModule DotFuncModule ENTRY DotFunc { x = f32[50,2] parameter(0) y = f32[2,2] parameter(1) ROOT dot = f32[50,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100), IsOkAndHolds(false)); } TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulRightLargeEnoughForRewrite) { const char* hlo_text = R"( HloModule DotFuncModule ENTRY DotFunc { x = f32[2,2] parameter(0) y = f32[2,50] parameter(1) ROOT dot = f32[2,50] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100), IsOkAndHolds(false)); } TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTogetherLargeEnoughForRewrite) { const char* hlo_text = R"( HloModule DotFuncModule ENTRY DotFunc { x = f32[4,16] parameter(0) y = f32[16,4] parameter(1) ROOT dot = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); auto dot = module->entry_computation()->root_instruction(); EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100), IsOkAndHolds(false)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/matmul_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/matmul_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5e5b458c-749f-4599-ab52-44f385a4c5f4
cpp
tensorflow/tensorflow
stream_executor_util
third_party/xla/xla/service/gpu/stream_executor_util.cc
third_party/xla/xla/service/gpu/stream_executor_util_test.cc
#include "xla/service/gpu/stream_executor_util.h" #include <cstdint> #include <iterator> #include <limits> #include <map> #include <memory> #include <optional> #include <random> #include <sstream> #include <string_view> #include <tuple> #include <type_traits> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/base/const_init.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" #include "absl/types/span.h" #include "Eigen/Core" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/layout.h" #include "xla/layout_util.h" #include "xla/primitive_util.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/hlo_module_config.h" #include "xla/shape_util.h" #include "xla/stream_executor/data_type.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/dnn.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/kernel_spec.h" #include "xla/stream_executor/launch_dim.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/typed_kernel_factory.h" #include "xla/tsl/protobuf/dnn.pb.h" #include "xla/tsl/util/proto/proto_utils.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/ml_dtypes.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { absl::StatusOr<se::dnn::VersionInfo> GetDnnVersionInfo( stream_executor::StreamExecutor* stream_exec) { if (!stream_exec) { return absl::InvalidArgumentError("StreamExecutor is null"); } stream_executor::dnn::DnnSupport* dnn = stream_exec->AsDnn(); if (!dnn) { return absl::FailedPreconditionError( "DNN library initialization failed. Look at the errors above for more " "details."); } return dnn->GetVersion(); } se::dnn::VersionInfo GetDnnVersionInfoOrDefault( stream_executor::StreamExecutor* stream_exec, se::dnn::VersionInfo fallback_version) { return GetDnnVersionInfo(stream_exec).value_or(fallback_version); } namespace { using se::dnn::DataLayout; using se::dnn::DataLayoutString; using se::dnn::FilterLayout; using se::dnn::FilterLayoutString; int64_t FindMissingDnum(absl::Span<const int64_t> vals) { for (int i = 0; i < vals.size(); i++) { if (!absl::c_linear_search(vals, i)) { return i; } } return vals.size(); } absl::StatusOr<Layout> DataLayoutToXlaLayout( DataLayout data_layout, int64_t batch_dimension, int64_t feature_dimension, absl::Span<int64_t const> spatial_dimensions) { std::vector<int64_t> layout; switch (data_layout) { case DataLayout::kBatchDepthYX: layout.push_back(batch_dimension); layout.push_back(feature_dimension); layout.insert(layout.end(), spatial_dimensions.begin(), spatial_dimensions.end()); break; case DataLayout::kBatchDepthYX4: case DataLayout::kBatchDepthYX32: layout.push_back(batch_dimension); layout.push_back(feature_dimension); layout.insert(layout.end(), spatial_dimensions.begin(), spatial_dimensions.end()); layout.push_back(FindMissingDnum(layout)); break; case DataLayout::kBatchYXDepth: layout.push_back(batch_dimension); layout.insert(layout.end(), spatial_dimensions.begin(), spatial_dimensions.end()); layout.push_back(feature_dimension); break; default: return Internal("Invalid layout %s", DataLayoutString(data_layout)); } return LayoutUtil::MakeLayoutFromMajorToMinor(layout); } } absl::StatusOr<std::tuple<Layout, Layout, Layout>> StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums, DataLayout input, FilterLayout filter, DataLayout output) { TF_ASSIGN_OR_RETURN( Layout input_layout, DataLayoutToXlaLayout(input, dnums.input_batch_dimension(), dnums.input_feature_dimension(), dnums.input_spatial_dimensions())); TF_ASSIGN_OR_RETURN( Layout output_layout, DataLayoutToXlaLayout(input, dnums.output_batch_dimension(), dnums.output_feature_dimension(), dnums.output_spatial_dimensions())); std::vector<int64_t> filter_layout; switch (filter) { case FilterLayout::kOutputInputYX: filter_layout.push_back(dnums.kernel_output_feature_dimension()); filter_layout.push_back(dnums.kernel_input_feature_dimension()); filter_layout.insert(filter_layout.end(), dnums.kernel_spatial_dimensions().begin(), dnums.kernel_spatial_dimensions().end()); break; case FilterLayout::kOutputInputYX4: filter_layout.push_back(dnums.kernel_output_feature_dimension()); filter_layout.push_back(dnums.kernel_input_feature_dimension()); filter_layout.insert(filter_layout.end(), dnums.kernel_spatial_dimensions().begin(), dnums.kernel_spatial_dimensions().end()); filter_layout.push_back(FindMissingDnum(filter_layout)); break; case FilterLayout::kOutputYXInput: filter_layout.push_back(dnums.kernel_output_feature_dimension()); filter_layout.insert(filter_layout.end(), dnums.kernel_spatial_dimensions().begin(), dnums.kernel_spatial_dimensions().end()); filter_layout.push_back(dnums.kernel_input_feature_dimension()); break; default: return Internal("Invalid filter layout %s for conv with dnums %s,", FilterLayoutString(filter), ConvolutionDimensionNumbersToString(dnums)); } return std::make_tuple(input_layout, LayoutUtil::MakeLayoutFromMajorToMinor(filter_layout), output_layout); } absl::StatusOr<std::tuple<DataLayout, FilterLayout, DataLayout>> XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums, const Shape& input, const Shape& filter, const Shape& output) { CHECK(input.has_layout()); CHECK(filter.has_layout()); CHECK(output.has_layout()); Layout nchw_input, nchw_filter, nchw_output; std::tie(nchw_input, nchw_filter, nchw_output) = StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX, FilterLayout::kOutputInputYX, DataLayout::kBatchDepthYX) .value(); Layout nchw_vect_input, nchw_vect_filter, nchw_vect_output; std::tie(nchw_vect_input, nchw_vect_filter, nchw_vect_output) = StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX4, FilterLayout::kOutputInputYX4, DataLayout::kBatchDepthYX4) .value(); Layout nhwc_input, nhwc_filter, nhwc_output; std::tie(nhwc_input, nhwc_filter, nhwc_output) = StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchYXDepth, FilterLayout::kOutputYXInput, DataLayout::kBatchYXDepth) .value(); DataLayout input_layout; if (LayoutUtil::Equal(input.layout(), nchw_input)) { input_layout = DataLayout::kBatchDepthYX; } else if (LayoutUtil::Equal(input.layout(), nchw_vect_input)) { int64_t vect_size = input.dimensions(input.layout().minor_to_major(0)); if (vect_size == 4) { input_layout = DataLayout::kBatchDepthYX4; } else if (vect_size == 32) { input_layout = DataLayout::kBatchDepthYX32; } else { return Internal( "Invalid input shape %s for conv with dnums %s. Most-minor dim " "should be 4 or 32, but was %d.", ShapeUtil::HumanStringWithLayout(input), ConvolutionDimensionNumbersToString(dnums), vect_size); } } else if (LayoutUtil::Equal(input.layout(), nhwc_input)) { input_layout = DataLayout::kBatchYXDepth; } else { return Internal( "Invalid input layout %s for conv with dnums %s; expected one of (%s, " "%s, %s)", LayoutUtil::HumanString(input.layout()), ConvolutionDimensionNumbersToString(dnums), nchw_input.ToString(), nchw_vect_input.ToString(), nhwc_input.ToString()); } FilterLayout filter_layout; if (LayoutUtil::Equal(filter.layout(), nchw_filter)) { filter_layout = FilterLayout::kOutputInputYX; } else if (LayoutUtil::Equal(filter.layout(), nchw_vect_filter)) { int64_t vect_size = filter.dimensions(filter.layout().minor_to_major(0)); if (vect_size == 4) { filter_layout = FilterLayout::kOutputInputYX4; } else if (vect_size == 32) { filter_layout = FilterLayout::kOutputInputYX32; } else { return Internal( "Invalid filter shape %s for conv with dnums %s. Most-minor dim " "should be 4 or 32, but was %d.", ShapeUtil::HumanStringWithLayout(filter), ConvolutionDimensionNumbersToString(dnums), vect_size); } } else if (LayoutUtil::Equal(filter.layout(), nhwc_filter)) { filter_layout = FilterLayout::kOutputYXInput; } else { return Internal( "Invalid filter layout %s for conv with dnums %s, expected one of (%s, " "%s, %s)", LayoutUtil::HumanString(filter.layout()), ConvolutionDimensionNumbersToString(dnums), nchw_filter.ToString(), nchw_vect_filter.ToString(), nhwc_filter.ToString()); } DataLayout output_layout; if (LayoutUtil::Equal(output.layout(), nchw_output)) { output_layout = DataLayout::kBatchDepthYX; } else if (LayoutUtil::Equal(output.layout(), nchw_vect_output)) { int64_t vect_size = output.dimensions(output.layout().minor_to_major(0)); if (vect_size == 4) { output_layout = DataLayout::kBatchDepthYX4; } else if (vect_size == 32) { output_layout = DataLayout::kBatchDepthYX32; } else { return Internal( "Invalid output shape %s for conv with dnums %s. Most-minor dim " "should be 4 or 32, but was %d.", ShapeUtil::HumanStringWithLayout(output), ConvolutionDimensionNumbersToString(dnums), vect_size); } } else if (LayoutUtil::Equal(output.layout(), nhwc_output)) { output_layout = DataLayout::kBatchYXDepth; } else { return Internal("Invalid output layout %s for conv with dnums %s", LayoutUtil::HumanString(output.layout()), ConvolutionDimensionNumbersToString(dnums)); } return std::make_tuple(input_layout, filter_layout, output_layout); } static std::optional<int64_t> FindVectorizedDim(int64_t rank, int64_t d0, int64_t d1, absl::Span<const int64_t> ds) { for (int64_t i = 0; i < rank; i++) { if (i == d0 || i == d1 || absl::c_linear_search(ds, i)) { continue; } return i; } return std::nullopt; } std::tuple<std::optional<int64_t>, std::optional<int64_t>, std::optional<int64_t>> FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums, const Shape& input, const Shape& filter, const Shape& output) { return { FindVectorizedDim(input.dimensions_size(), dnums.input_batch_dimension(), dnums.input_feature_dimension(), dnums.input_spatial_dimensions()), FindVectorizedDim(filter.dimensions_size(), dnums.kernel_input_feature_dimension(), dnums.kernel_output_feature_dimension(), dnums.kernel_spatial_dimensions()), FindVectorizedDim( output.dimensions_size(), dnums.output_batch_dimension(), dnums.output_feature_dimension(), dnums.output_spatial_dimensions()), }; } absl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec) { static absl::Mutex mu(absl::kConstInit); static auto* mutexes = new std::map<std::pair<const se::Platform*, int64_t>, absl::Mutex>(); absl::MutexLock global_lock(&mu); auto it = mutexes ->emplace(std::piecewise_construct, std::make_tuple(stream_exec->GetPlatform(), stream_exec->device_ordinal()), std::make_tuple()) .first; return it->second; } absl::StatusOr<std::unique_ptr<se::Kernel>> CreateKernel( absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx, absl::Span<const uint8_t> cubin_data, se::StreamExecutor* stream_exec, uint32_t shared_mem_bytes) { se::MultiKernelLoaderSpec loader_spec(num_args); loader_spec.AddCudaPtxInMemory(ptx, kernel_name); if (!cubin_data.empty()) { loader_spec.AddCudaCubinInMemory(cubin_data, kernel_name); } TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel, stream_exec->LoadKernel(loader_spec)); se::KernelMetadata m; m.set_shared_memory_bytes(shared_mem_bytes); kernel->set_metadata(m); return kernel; } absl::Status ExecuteKernelOnStream(const se::Kernel& kernel, absl::Span<const se::DeviceMemoryBase> args, const LaunchDimensions& dims, se::Stream* stream) { TF_ASSIGN_OR_RETURN( std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args, se::PackKernelArgs(args, kernel.metadata())); return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(), kernel, *kernel_args); } absl::Status ExecuteKernelOnStream(const se::Kernel& kernel, absl::Span<const se::DeviceMemoryBase> args, const LaunchDimensions& dims, const se::ClusterDim& cluster_dim, se::Stream* stream) { TF_ASSIGN_OR_RETURN( std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args, se::PackKernelArgs(args, kernel.metadata())); return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(), cluster_dim, kernel, *kernel_args); } template <typename T, typename Generator> typename std::enable_if<std::is_integral<T>::value, T>::type static UniformDistribution(T lhs, T rhs, Generator* gen) = delete; template <typename T, typename Generator> typename std::enable_if<std::is_floating_point<T>::value, T>::type static UniformDistribution(T lhs, T rhs, Generator* gen) { return std::uniform_real_distribution<T>(lhs, rhs)(*gen); } namespace repeat_buffer_kernel { void* kernel(); } template <typename T> static void InitializeTypedBuffer(se::Stream* stream, se::DeviceMemoryBase buffer, int64_t* rng_state) { constexpr int host_buffer_size = 10069; static std::vector<T>* host_buffer = [&] { auto* ret = new std::vector<T>(host_buffer_size); std::mt19937 gen; for (auto& element : *ret) { constexpr bool kIsIntegral = std::numeric_limits<T>::is_integer; constexpr bool kIsLowRange = !kIsIntegral && std::numeric_limits<T>::max_exponent <= std::numeric_limits<Eigen::half>::max_exponent; using RandomType = typename std::conditional<std::is_same_v<T, double>, double, float>::type; auto upper_bound = RandomType(kIsLowRange ? 0.1 : 1.0); auto rand_val = UniformDistribution(RandomType(0), upper_bound, &gen); element = T(kIsIntegral ? rand_val + 0.5 : rand_val); } return ret; }(); CHECK_EQ(0, buffer.size() % sizeof(T)); int64_t elements_to_fill = buffer.size() / sizeof(T); int64_t host_index = *rng_state; CHECK_LT(host_index, host_buffer_size); *rng_state = (*rng_state + elements_to_fill) % host_buffer_size; int64_t first_size = std::min<int64_t>(host_buffer_size - host_index, elements_to_fill); TF_CHECK_OK(stream->Memcpy(&buffer, host_buffer->data() + host_index, first_size * sizeof(T))); elements_to_fill -= first_size; if (elements_to_fill == 0) { return; } int64_t second_size = std::min<int64_t>(host_index, elements_to_fill); CHECK_LE(first_size + second_size, host_buffer_size); se::DeviceMemoryBase mem = buffer.GetByteSlice(first_size * sizeof(T), second_size * sizeof(T)); TF_CHECK_OK(stream->Memcpy(&mem, host_buffer->data(), mem.size())); elements_to_fill -= second_size; if (elements_to_fill == 0) { return; } #ifdef GOOGLE_CUDA CHECK_EQ(elements_to_fill, buffer.size() / sizeof(T) - host_buffer_size); se::StreamExecutor* executor = stream->parent(); auto kernel = se::TypedKernelFactory<se::DeviceMemoryBase, int64_t, int64_t>::Create( executor, "RepeatBufferKernel", repeat_buffer_kernel::kernel()); if (!kernel.ok()) { LOG(FATAL) << "Could not create RepeatBufferKernel: " << kernel.status(); } constexpr int64_t host_buffer_bytes = host_buffer_size * sizeof(T); constexpr int threads_per_block = 256; constexpr int blocks_per_grid = (host_buffer_bytes + threads_per_block - 1) / threads_per_block; TF_CHECK_OK(stream->ThenLaunch(se::ThreadDim(threads_per_block, 1, 1), se::BlockDim(blocks_per_grid, 1, 1), *kernel, buffer, host_buffer_bytes, static_cast<int64_t>(buffer.size()))); #endif } void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type, int64_t* rng_state, se::DeviceMemoryBase buffer) { return primitive_util::PrimitiveTypeSwitch<void>( [&](auto primitive_type_constant) -> void { if constexpr (primitive_util::IsFloatingPointType( primitive_type_constant) || primitive_util::IsIntegralType(primitive_type_constant)) { using NativeT = typename primitive_util::PrimitiveTypeToNative< primitive_type_constant>::type; return InitializeTypedBuffer<NativeT>(stream, buffer, rng_state); } if constexpr (primitive_util::IsComplexType(primitive_type_constant)) { using NativeT = typename primitive_util::PrimitiveTypeToNative< primitive_type_constant>::type; return InitializeTypedBuffer<typename NativeT::value_type>( stream, buffer, rng_state); } if constexpr (primitive_type_constant == PRED) { return InitializeTypedBuffer<int8_t>(stream, buffer, rng_state); } LOG(FATAL) << "Unexpected type: " << primitive_util::LowercasePrimitiveTypeName(buffer_type); }, buffer_type); } absl::StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind( CudnnConvKind kind) { switch (kind) { case CudnnConvKind::kBackwardFilter: return se::dnn::BACKWARD_FILTER; case CudnnConvKind::kBackwardInput: return se::dnn::BACKWARD_DATA; case CudnnConvKind::kForward: return se::dnn::FORWARD; case CudnnConvKind::kForwardActivation: return se::dnn::FORWARD_BIAS_ACTIVATION; case CudnnConvKind::kForwardGraph: return se::dnn::FORWARD_GRAPH; default: break; } return Internal("Unexpected convolution kind"); } absl::StatusOr<se::dnn::NormKind> GetDNNNormKindFromCudnnNormKind( CudnnNormKind kind) { switch (kind) { case CudnnNormKind::kLayerForwardInfer: return se::dnn::LAYER_FWD_INFER; case CudnnNormKind::kLayerForwardTrain: return se::dnn::LAYER_FWD_TRAIN; case CudnnNormKind::kLayerBackward: return se::dnn::LAYER_BWD; default: return Internal("Unexpected norm kind"); } } absl::StatusOr<se::dnn::FMHAMaskKind> GetDNNFmhaMaskKindFromCudnnFmhaMaskKind( CudnnfMHAMaskKind kind) { switch (kind) { case CudnnfMHAMaskKind::kNoMask: return se::dnn::NO_MASK; case CudnnfMHAMaskKind::kPadding: return se::dnn::PADDING; case CudnnfMHAMaskKind::kCausal: return se::dnn::CAUSAL; case CudnnfMHAMaskKind::kPaddingCausal: return se::dnn::PADDING_CAUSAL; case CudnnfMHAMaskKind::kAlibi: return se::dnn::ALIBI; default: return Internal("Unexpected fmha mask kind"); } } absl::StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType( PrimitiveType type) { switch (type) { case F16: return se::dnn::ToDataType<Eigen::half>::value; case F32: return se::dnn::ToDataType<float>::value; case F64: return se::dnn::ToDataType<double>::value; case S8: return se::dnn::ToDataType<int8_t>::value; case S32: return se::dnn::ToDataType<int32_t>::value; case BF16: return se::dnn::ToDataType<Eigen::bfloat16>::value; case F8E4M3FN: return se::dnn::ToDataType<tsl::float8_e4m3fn>::value; case F8E5M2: return se::dnn::ToDataType<tsl::float8_e5m2>::value; default: break; } return Internal("Unsupported datatype"); } bool RequireDeterminism(const HloModuleConfig& config) { return config.debug_options().xla_gpu_deterministic_ops() || config.debug_options().xla_gpu_exclude_nondeterministic_ops(); } namespace { std::vector<AutotuneResult> KeepNonFailures( absl::Span<AutotuneResult const> profile_results) { std::vector<AutotuneResult> filtered_results; absl::c_copy_if(profile_results, std::back_inserter(filtered_results), [](const AutotuneResult& r) { return !r.has_failure() || r.failure().kind() == AutotuneResult::WRONG_RESULT; }); return filtered_results; } absl::Status AllAlgorithmsFailedInternalError( std::optional<std::string_view> instr_str, absl::Span<AutotuneResult const> profile_results) { std::ostringstream msg; if (instr_str.has_value()) { msg << "All algorithms tried for " << instr_str.value() << " failed. Falling back to default algorithm. Per-algorithm " "errors:"; } else { msg << "All algorithms failed. Falling back to the default algorithm. " << "Per-algorithm errors:"; } for (const auto& result : profile_results) { msg << "\n " << result.failure().msg(); } return Internal("%s", msg.str()); } absl::Status NoAlgorithmSuppliedInternalError( std::optional<std::string_view> instr_str) { std::ostringstream msg; if (instr_str.has_value()) { msg << "There are no algorithm candidates for computing: \n " << instr_str.value() << "\nThis likely means that the instruction shape is not supported by " "the target GPU library."; } else { msg << "There are no algorithm candidates for computing the instruction.\n" "This likely means that the instruction shape is not supported by " "the target GPU library."; } return Internal("%s", msg.str()); } void SortAutotuningResultsByRunTime(std::vector<AutotuneResult>& results) { absl::c_sort(results, [](const AutotuneResult& lhs, const AutotuneResult& rhs) { return tsl::proto_utils::FromDurationProto(lhs.run_time()) < tsl::proto_utils::FromDurationProto(rhs.run_time()); }); } absl::Span<AutotuneResult const> TopResultsWithinMeasurementError( std::vector<AutotuneResult>& results_sorted_by_runtime) { constexpr absl::Duration kMeasurementError = absl::Microseconds(4); absl::Duration min_time = tsl::proto_utils::FromDurationProto( results_sorted_by_runtime.front().run_time()); absl::Duration limit_time = min_time + kMeasurementError; auto limit_time_it = absl::c_find_if( results_sorted_by_runtime, [limit_time](const AutotuneResult& x) { return tsl::proto_utils::FromDurationProto(x.run_time()) > limit_time; }); return absl::MakeSpan(&*results_sorted_by_runtime.begin(), &*limit_time_it); } } absl::StatusOr<AutotuneResult> PickBestResult( absl::Span<AutotuneResult const> profile_results, std::optional<std::string_view> instr_str, HloModuleConfig hlo_module_config) { if (profile_results.empty()) { return NoAlgorithmSuppliedInternalError(instr_str); } std::vector<AutotuneResult> filtered_results = KeepNonFailures(profile_results); if (filtered_results.empty()) { return AllAlgorithmsFailedInternalError(instr_str, profile_results); } if (RequireDeterminism(hlo_module_config)) { return *filtered_results.begin(); } SortAutotuningResultsByRunTime(filtered_results); auto top_within_error = TopResultsWithinMeasurementError(filtered_results); return *absl::c_min_element(top_within_error, [](const AutotuneResult& lhs, const AutotuneResult& rhs) { return lhs.scratch_bytes() < rhs.scratch_bytes(); }); } } }
#include "xla/service/gpu/stream_executor_util.h" #include <cstdint> #include <vector> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/time/time.h" #include "xla/autotuning.pb.h" #include "xla/service/hlo_module_config.h" #include "xla/tsl/util/proto/proto_utils.h" namespace xla::gpu { namespace { struct Result { int64_t run_time_ns; int64_t scratch_bytes; bool operator==(const Result& other) const { return other.run_time_ns == run_time_ns && other.scratch_bytes == scratch_bytes; }; explicit operator AutotuneResult() const { AutotuneResult result; *result.mutable_run_time() = tsl::proto_utils::ToDurationProto(absl::Nanoseconds(run_time_ns)); result.set_scratch_bytes(scratch_bytes); return result; } }; static Result ATRToResult(AutotuneResult atr) { return Result{.run_time_ns = absl::ToInt64Nanoseconds( tsl::proto_utils::FromDurationProto(atr.run_time())), .scratch_bytes = atr.scratch_bytes()}; } std::vector<AutotuneResult> Results(const std::vector<Result>& stats) { std::vector<AutotuneResult> results; for (const auto& s : stats) results.push_back(AutotuneResult(s)); return results; } TEST(StreamExecutorTest, PickBestResult) { absl::StatusOr<AutotuneResult> atr; atr = PickBestResult(Results({{9000, 0}, {1000, 0}, {16000, 0}}), "", {}); EXPECT_EQ(ATRToResult(atr.value()), Result({1000, 0})); atr = PickBestResult(Results({{4700, 0}, {4600, 0}, {4500, 0}}), "", {}); EXPECT_EQ(ATRToResult(atr.value()), Result({4500, 0})); atr = PickBestResult(Results({{4700, 0}, {4600, 2}, {4500, 1}}), "", {}); EXPECT_EQ(ATRToResult(atr.value()), Result({4700, 0})); atr = PickBestResult(Results({{5000, 1}, {6000, 0}, {7500, 0}}), "", {}); EXPECT_EQ(ATRToResult(atr.value()), Result({6000, 0})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
adb8539d-0c2b-41c4-b736-da697e10d787
cpp
tensorflow/tensorflow
gpu_compiler
third_party/xla/xla/service/gpu/gpu_compiler.cc
third_party/xla/xla/service/gpu/gpu_compiler_test.cc
#include "xla/service/gpu/gpu_compiler.h" #include <algorithm> #include <array> #include <cstdint> #include <functional> #include <memory> #include <new> #include <optional> #include <string> #include <string_view> #include <utility> #include <variant> #include <vector> #include "absl/base/call_once.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "absl/types/variant.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include "llvm/AsmParser/Parser.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/Bitcode/BitcodeWriter.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Error.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/SplitModule.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/DialectRegistry.h" #include "mlir/Support/LLVM.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_module_group.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/maybe_owning.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/all_gather_broadcast_reorder.h" #include "xla/service/all_gather_combiner.h" #include "xla/service/all_reduce_combiner.h" #include "xla/service/all_reduce_contiguous.h" #include "xla/service/all_reduce_folder.h" #include "xla/service/all_reduce_promotion.h" #include "xla/service/all_reduce_reassociate.h" #include "xla/service/async_collective_creator.h" #include "xla/service/batched_gather_scatter_normalizer.h" #include "xla/service/batchnorm_expander.h" #include "xla/service/bitcast_dtypes_expander.h" #include "xla/service/broadcast_canonicalizer.h" #include "xla/service/buffer_assignment.h" #include "xla/service/call_inliner.h" #include "xla/service/collective_permute_decomposer.h" #include "xla/service/collective_pipeliner.h" #include "xla/service/collective_quantizer.h" #include "xla/service/collectives_schedule_linearizer.h" #include "xla/service/comparison_expander.h" #include "xla/service/compiler.h" #include "xla/service/conditional_canonicalizer.h" #include "xla/service/conditional_simplifier.h" #include "xla/service/convert_memory_placement_to_internal_annotations.h" #include "xla/service/convert_mover.h" #include "xla/service/convolution_4d_expander.h" #include "xla/service/convolution_pred_expander.h" #include "xla/service/copy_insertion.h" #include "xla/service/cpu_gpu_shape_verifier.h" #include "xla/service/dot_decomposer.h" #include "xla/service/dot_merger.h" #include "xla/service/dump.h" #include "xla/service/dynamic_dimension_inference.h" #include "xla/service/dynamic_dimension_simplifier.h" #include "xla/service/dynamic_index_splitter.h" #include "xla/service/dynamic_padder.h" #include "xla/service/eigh_expander.h" #include "xla/service/executable.h" #include "xla/service/export_hlo.h" #include "xla/service/flatten_call_graph.h" #include "xla/service/float_normalization.h" #include "xla/service/float_support.h" #include "xla/service/gather_expander.h" #include "xla/service/gather_simplifier.h" #include "xla/service/gpu/autotuning/autotuner_util.h" #include "xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h" #include "xla/service/gpu/compile_module_to_llvm_ir.h" #include "xla/service/gpu/conv_layout_normalization.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/execution_stream_assignment.h" #include "xla/service/gpu/fusion_pipeline.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/gpu_executable.h" #include "xla/service/gpu/gpu_float_support.h" #include "xla/service/gpu/gpu_hlo_schedule.h" #include "xla/service/gpu/gpu_latency_hiding_scheduler.h" #include "xla/service/gpu/gpu_p2p_pipeliner.h" #include "xla/service/gpu/gpu_spmd_pipeline.h" #include "xla/service/gpu/hlo_fusion_stats.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/ir_emitter_unnested.h" #include "xla/service/gpu/kernel_reuse_cache.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/metrics.h" #include "xla/service/gpu/model/gpu_cost_model_stats_collection.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/gpu/prepare_hlo_for_ir_emitting_pipeline.h" #include "xla/service/gpu/reduction_utils.h" #include "xla/service/gpu/runtime_intrinsics.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/gpu/transforms/algebraic_simplifier.h" #include "xla/service/gpu/transforms/algorithm_checker.h" #include "xla/service/gpu/transforms/all_gather_optimizer.h" #include "xla/service/gpu/transforms/all_reduce_blueconnect.h" #include "xla/service/gpu/transforms/all_reduce_splitter.h" #include "xla/service/gpu/transforms/async_collective_annotator.h" #include "xla/service/gpu/transforms/async_wrapper.h" #include "xla/service/gpu/transforms/collective_permute_cycle_decomposer.h" #include "xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h" #include "xla/service/gpu/transforms/command_buffer_scheduling.h" #include "xla/service/gpu/transforms/conv_rewriter.h" #include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h" #include "xla/service/gpu/transforms/cudnn_custom_call_converter.h" #include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h" #include "xla/service/gpu/transforms/dot_dimension_sorter.h" #include "xla/service/gpu/transforms/dot_operand_converter.h" #include "xla/service/gpu/transforms/double_buffer_loop_unrolling.h" #include "xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h" #include "xla/service/gpu/transforms/fusion_block_level_rewriter.h" #include "xla/service/gpu/transforms/fusion_wrapper.h" #include "xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.h" #include "xla/service/gpu/transforms/gemm_fusion.h" #include "xla/service/gpu/transforms/gemm_rewriter.h" #include "xla/service/gpu/transforms/gemv_rewriter.h" #include "xla/service/gpu/transforms/layout_assignment.h" #include "xla/service/gpu/transforms/move_copy_to_users.h" #include "xla/service/gpu/transforms/pipelined_p2p_rewriter.h" #include "xla/service/gpu/transforms/reduce_scatter_creator.h" #include "xla/service/gpu/transforms/reduction_degenerate_dim_remover.h" #include "xla/service/gpu/transforms/reduction_dimension_grouper.h" #include "xla/service/gpu/transforms/reduction_layout_normalizer.h" #include "xla/service/gpu/transforms/reduction_splitter.h" #include "xla/service/gpu/transforms/rename_fusions.h" #include "xla/service/gpu/transforms/sanitize_constant_names.h" #include "xla/service/gpu/transforms/scatter_expander.h" #include "xla/service/gpu/transforms/scatter_slice_simplifier.h" #include "xla/service/gpu/transforms/softmax_rewriter_triton.h" #include "xla/service/gpu/transforms/stream_attribute_annotator.h" #include "xla/service/gpu/transforms/stream_attribute_async_wrapper.h" #include "xla/service/gpu/transforms/topk_specializer.h" #include "xla/service/gpu/transforms/topk_splitter.h" #include "xla/service/gpu/transforms/transpose_dimension_grouper.h" #include "xla/service/gpu/transforms/tree_reduction_rewriter.h" #include "xla/service/gpu/transforms/triton_fusion_numerics_verifier.h" #include "xla/service/gpu/transforms/windowed_einsum_handler.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_computation_deduplicator.h" #include "xla/service/hlo_constant_folding.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/hlo_cse.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_rematerialization.h" #include "xla/service/hlo_verifier.h" #include "xla/service/host_memory_transfer_asyncifier.h" #include "xla/service/host_offload_legalize.h" #include "xla/service/host_offloader.h" #include "xla/service/layout_assignment.h" #include "xla/service/layout_normalization.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/service/logistic_expander.h" #include "xla/service/operand_upcaster.h" #include "xla/service/optimization_barrier_expander.h" #include "xla/service/optimize_input_output_buffer_alias.h" #include "xla/service/qr_expander.h" #include "xla/service/real_imag_expander.h" #include "xla/service/reduce_decomposer.h" #include "xla/service/reduce_scatter_combiner.h" #include "xla/service/reduce_scatter_reassociate.h" #include "xla/service/reduce_window_rewriter.h" #include "xla/service/reshape_decomposer.h" #include "xla/service/reshape_mover.h" #include "xla/service/result_caster.h" #include "xla/service/rng_bit_generator_expander.h" #include "xla/service/rng_expander.h" #include "xla/service/scatter_expander.h" #include "xla/service/scatter_simplifier.h" #include "xla/service/sharding_remover.h" #include "xla/service/simplify_fp_conversions.h" #include "xla/service/slice_sinker.h" #include "xla/service/slow_operation_alarm.h" #include "xla/service/sort_simplifier.h" #include "xla/service/stable_sort_expander.h" #include "xla/service/stochastic_convert_decomposer.h" #include "xla/service/sub_byte_normalization.h" #include "xla/service/topk_rewriter.h" #include "xla/service/transpose_folding.h" #include "xla/service/tuple_simplifier.h" #include "xla/service/while_loop_all_reduce_code_motion.h" #include "xla/service/while_loop_constant_sinking.h" #include "xla/service/while_loop_simplifier.h" #include "xla/service/while_loop_trip_count_annotator.h" #include "xla/service/zero_sized_hlo_elimination.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/device_description.pb.h" #include "xla/stream_executor/dnn.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/semantic_version.h" #include "xla/stream_executor/stream_executor.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/blocking_counter.h" #include "tsl/platform/casts.h" #include "tsl/platform/cpu_info.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/numbers.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" #include "tsl/platform/threadpool.h" #include "tsl/profiler/lib/scoped_annotation.h" #include "tsl/profiler/lib/traceme.h" #ifdef PLATFORM_GOOGLE #include "xla/hlo/experimental/auto_sharding/auto_sharding.h" #endif namespace xla { namespace gpu { namespace { using MaybeOwningThreadPool = MaybeOwning<tsl::thread::ThreadPool>; MaybeOwningThreadPool CreateMaybeOwningThreadPool( int parallelism, tsl::thread::ThreadPool* default_thread_pool, int default_parallelism) { CHECK_GE(parallelism, 0); CHECK_GE(default_parallelism, 1); CHECK(default_thread_pool == nullptr || default_thread_pool->CurrentThreadId() == -1); auto create_thread_pool = [&](int num_threads) { CHECK_GE(num_threads, 1); return std::make_unique<tsl::thread::ThreadPool>(tsl::Env::Default(), "", num_threads); }; switch (parallelism) { case 0: if (default_thread_pool == nullptr && default_parallelism > 1) { return MaybeOwningThreadPool(create_thread_pool(default_parallelism)); } return MaybeOwningThreadPool(default_thread_pool); case 1: return MaybeOwningThreadPool(nullptr); default: return MaybeOwningThreadPool(create_thread_pool(parallelism)); } } absl::StatusOr<AutotuneConfig> GetAutotuneConfig( se::StreamExecutor* stream_exec, const DebugOptions& debug_options, const GpuCompiler::CompileOptions& options, const Compiler::TargetConfig& gpu_target_config) { if (stream_exec) { return AutotuneConfig{DeviceConfig{stream_exec, options.device_allocator}, debug_options}; } return AutotuneConfig{DevicelessConfig{gpu_target_config.device_description}, debug_options}; } se::GpuComputeCapability GetGpuVersion(const se::StreamExecutor* stream_exec) { return stream_exec->GetDeviceDescription().gpu_compute_capability(); } class GpuThunkAotCompilationResult : public AotCompilationResult { public: static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>> FromModule(const HloModule* hlo_module, const BufferAssignment* buffer_assignment, std::string_view asm_text, absl::Span<const uint8_t> binary, const BinaryMap& dnn_compiled_graphs) { CompilationResultProto proto; *proto.mutable_hlo_module_with_config() = hlo_module->ToProtoWithConfig(); *proto.mutable_buffer_assignment() = buffer_assignment->ToProto(); proto.set_asm_text(std::string(asm_text)); proto.set_binary(binary.data(), binary.size()); proto.mutable_dnn_compiled_graphs()->insert(dnn_compiled_graphs.cbegin(), dnn_compiled_graphs.cend()); return std::unique_ptr<GpuThunkAotCompilationResult>( new GpuThunkAotCompilationResult(hlo_module->Clone(), std::move(proto))); } static absl::StatusOr<std::unique_ptr<GpuThunkAotCompilationResult>> FromString(const std::string& serialized) { CompilationResultProto proto; if (!proto.ParseFromString(serialized)) { return Internal( "Failed to parse serialized GpuThunkAotCompilationResult."); } TF_ASSIGN_OR_RETURN( std::unique_ptr<HloModule> module, HloModule::CreateFromProtoWithConfig(proto.hlo_module_with_config())); return std::unique_ptr<GpuThunkAotCompilationResult>( new GpuThunkAotCompilationResult(std::move(module), std::move(proto))); } absl::StatusOr<std::string> SerializeAsString() const override { return proto_.SerializeAsString(); } absl::StatusOr<std::unique_ptr<Executable>> LoadExecutable( Compiler* compiler, const se::StreamExecutor* stream_exec) const override; const HloModule* optimized_module() const override { return module_.get(); } std::unique_ptr<HloModule> consume_optimized_module() override { return std::move(module_); } private: GpuThunkAotCompilationResult(std::unique_ptr<HloModule> module, CompilationResultProto proto) : module_(std::move(module)), proto_(std::move(proto)) {} std::unique_ptr<HloModule> module_; CompilationResultProto proto_; }; } absl::StatusOr<std::unique_ptr<Executable>> GpuThunkAotCompilationResult::LoadExecutable( Compiler* compiler, const se::StreamExecutor* stream_exec) const { TF_ASSIGN_OR_RETURN( std::unique_ptr<HloModule> hlo_module, HloModule::CreateFromProtoWithConfig(proto_.hlo_module_with_config())); TF_ASSIGN_OR_RETURN( std::unique_ptr<BufferAssignment> buffer_assignment, BufferAssignment::FromProto(proto_.buffer_assignment(), hlo_module.get(), compiler->BufferSizeBytesFunction(), nullptr)); ExecutionStreamAssignment execution_stream_assignment(hlo_module.get()); std::vector<uint8_t> binary(proto_.binary().begin(), proto_.binary().end()); TF_ASSIGN_OR_RETURN( se::Platform * platform, se::PlatformManager::PlatformWithId(compiler->PlatformId())); std::string platform_name = platform->Name(); const se::DeviceDescription& gpu_device_info = stream_exec->GetDeviceDescription(); mlir::DialectRegistry registry; auto mlir_context = std::make_unique<mlir::MLIRContext>(registry); llvm::LLVMContext llvm_context; auto* gpu_compiler = dynamic_cast<GpuCompiler*>(compiler); if (gpu_compiler == nullptr) { return Internal("Compiler is not a GpuCompiler."); } auto llvm_module = std::make_unique<llvm::Module>("", llvm_context); llvm_module->setTargetTriple(gpu_compiler->target_triple()); llvm_module->setDataLayout(gpu_compiler->data_layout()); IrEmitterContext ir_emitter_context( hlo_module.get(), buffer_assignment.get(), &execution_stream_assignment, platform_name, gpu_device_info, mlir_context.get(), llvm_module.get(), nullptr, false); absl::string_view cache_file_path = hlo_module->config().debug_options().xla_gpu_kernel_cache_file(); if (!cache_file_path.empty() && hlo_module->config() .debug_options() .xla_gpu_enable_llvm_module_compilation_parallelism()) { TF_RETURN_IF_ERROR(LoadCache(ir_emitter_context, cache_file_path)); } auto ir_emitter = IrEmitterUnnested::Create(&ir_emitter_context); TF_RETURN_IF_ERROR( ir_emitter->EmitHloComputation(hlo_module->entry_computation())); std::vector<GpuExecutable::ConstantInfo> constants = std::move(ir_emitter_context.constants()); TF_ASSIGN_OR_RETURN(auto output_info, GetOutputInfo(*hlo_module, *buffer_assignment)); const Shape& output_shape = hlo_module->result_shape(); int64_t debug_buffer_assignment_show_max = hlo_module->config() .debug_options() .xla_debug_buffer_assignment_show_max(); TF_ASSIGN_OR_RETURN( std::unique_ptr<GpuExecutable> executable, GpuExecutable::Create(GpuExecutable::Params{ proto_.asm_text(), binary, BinaryMap(proto_.dnn_compiled_graphs().cbegin(), proto_.dnn_compiled_graphs().cend()), gpu_device_info.gpu_compute_capability(), ir_emitter->ConsumeThunkSequence(), std::move(constants), std::move(output_info), std::move(hlo_module->name()), std::move(output_shape), std::nullopt, std::move(buffer_assignment), debug_buffer_assignment_show_max, std::move(hlo_module), true})); return executable; } GpuCompiler::GpuCompiler(se::Platform::Id platform_id, const char* target_triple, const char* data_layout) : platform_id_(platform_id), target_triple_(target_triple), data_layout_(data_layout), pointer_size_(llvm::DataLayout(data_layout) .getPointerSize(0 )) {} namespace { void AddHloVerifier(HloPassPipeline* pipeline, bool verify_unique_channel_ids = false, HloVerifierOpts&& opts = {}, bool debug_only = false) { opts.verify_unique_channel_ids = verify_unique_channel_ids; std::unique_ptr<TargetVerifierMetadata> verifier_metadata = std::make_unique<CpuGpuVerifierMetadata>(std::move(opts)); if (debug_only) { pipeline->AddInvariantCheckerDebug<HloVerifier>( std::move(verifier_metadata), "hlo verifier (debug)"); } else { pipeline->AddInvariantChecker<HloVerifier>(std::move(verifier_metadata), "hlo verifier"); } } void CheckNotScheduled(HloModule* hlo_module) { if (hlo_module->has_schedule() && !hlo_module->config().debug_options().xla_disable_all_hlo_passes()) { LOG(WARNING) << "\nThe current HLO module " << hlo_module->name() << " is scheduled and optimized. \n" << "It is not expected to run optimization passes again.\n" "Use a test method like RunAndCompareNoHloPasses() or " << "the xla_disable_all_hlo_passes flag."; } } void LogDebugOptions(HloModule* hlo_module) { XLA_VLOG_LINES( 1, absl::StrFormat("GpuCompilationEnvironment of hlo_module %s:\n%s", hlo_module->name(), hlo_module->config().debug_options().DebugString())); } AlgebraicSimplifierOptions LayoutInsensitiveAlgebraicSimplifierOptions( const HloModuleConfig& hlo_module_config, const Compiler::TargetConfig& gpu_target_config, AlgebraicSimplifierOptions opts_from_compiler) { AlgebraicSimplifierOptions layout_insensitive_algsimp_opts = opts_from_compiler; layout_insensitive_algsimp_opts.set_conv_is_lowerable_callback( ConvRewriter::ConvIsLowerable); layout_insensitive_algsimp_opts.set_enable_dot_strength_reduction( hlo_module_config.debug_options() .xla_gpu_enable_dot_strength_reduction()); layout_insensitive_algsimp_opts.set_supports_non_canonical_dots(false); layout_insensitive_algsimp_opts.set_minmax_propagate_nan( !hlo_module_config.debug_options().xla_gpu_enable_fast_min_max()); layout_insensitive_algsimp_opts .set_unconditionally_simplify_reduce_of_transpose_or_reshape(true); if (gpu_target_config.platform_name == "ROCM") { layout_insensitive_algsimp_opts.set_enable_conv_operand_swap(false); } layout_insensitive_algsimp_opts .set_enable_unconditional_reduce_of_concat_replacement(false); return layout_insensitive_algsimp_opts; } absl::Status RunPreSPMDPartitionerPasses(HloModule* hlo_module) { HloPassPipeline pre_spmd_pipeline("pre-spmd-partitioner"); pre_spmd_pipeline.AddPass<BatchedGatherScatterNormalizer>(); pre_spmd_pipeline.AddPass<CuDnnCustomCallConverter>(); pre_spmd_pipeline.AddPass<ConvertMemoryPlacementToInternalAnnotations>(); pre_spmd_pipeline.AddPass<CallInliner>(); pre_spmd_pipeline.AddPass<ZeroSizedHloElimination>(); pre_spmd_pipeline.AddPass<ConditionalCanonicalizer>(); pre_spmd_pipeline.AddPass<TopkDecomposer>([&](const HloInstruction* instr) { return instr->opcode() == HloOpcode::kTopK; }); pre_spmd_pipeline.AddPass<TopkRewriter>( [](const HloSortInstruction*, int64_t) { return true; }); return pre_spmd_pipeline.Run(hlo_module).status(); } absl::Status RunSPMDPasses( HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config, const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) { bool auto_sharding = hlo_module->config().use_auto_spmd_partitioning(); #ifndef PLATFORM_GOOGLE if (auto_sharding) { LOG(ERROR) << "GPU autosharding is not yet available in open source."; } #endif const int64_t num_partitions = hlo_module->config().num_partitions(); if (num_partitions > 1) { if (!hlo_module->config().use_spmd_partitioning()) { return InvalidArgument( "num_partitions=%d but SPMD partitioning not enabled.", num_partitions); } HloPassPipeline spmd_pipeline("spmd-partitioner"); AddSPMDPasses( hlo_module, layout_insensitive_algsimp_opts, gpu_target_config.device_description.gpu_compute_capability(), spmd_pipeline, #ifdef PLATFORM_GOOGLE [&](HloPassPipeline& pipeline) { if (auto_sharding) { AutoShardingOption option; option.enable = true; if (!hlo_module->config() .auto_spmd_partitioning_mesh_shape() .empty()) { option.device_mesh_shape = hlo_module->config().auto_spmd_partitioning_mesh_shape(); } else { option.device_mesh_shape = { gpu_target_config.device_description.core_count(), 1}; } if (!hlo_module->config() .auto_spmd_partitioning_mesh_ids() .empty()) { option.device_mesh_ids = hlo_module->config().auto_spmd_partitioning_mesh_ids(); } option.memory_budget_per_device = hlo_module->config() .debug_options() .xla_gpu_auto_spmd_partitioning_memory_budget_gb() * 1024 * 1024 * 1024; option.memory_budget_ratio = hlo_module->config() .debug_options() .xla_gpu_auto_spmd_partitioning_memory_budget_ratio(); spmd_pipeline.AddPass<AutoSharding>(option); } }); #else std::nullopt); #endif if (hlo_module->config() .debug_options() .xla_gpu_unsafe_pipelined_loop_annotator()) { spmd_pipeline.AddPass<WhileLoopTripCountAnnotator>(); spmd_pipeline.AddPass<CollectivePermuteValidIterationAnnotator>(); } return spmd_pipeline.Run(hlo_module).status(); } else { HloPassPipeline sharding_removal_pipeline("sharding-removal"); sharding_removal_pipeline.AddPass<ShardingRemover>(); sharding_removal_pipeline.AddPass<HloDCE>(); return sharding_removal_pipeline.Run(hlo_module).status(); } } absl::Status RunOptimizationPasses( HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config, const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts) { const DebugOptions& debug_options = hlo_module->config().debug_options(); HloPassPipeline pipeline("optimization"); AddHloVerifier(&pipeline, !debug_options.xla_experimental_ignore_channel_id()); if (debug_options.xla_gpu_multi_streamed_windowed_einsum()) { pipeline.AddPass<WindowedEinsumHandler>(); } pipeline.AddPass<TopKSplitter>(); pipeline.AddPass<TopkSpecializer>(); pipeline.AddPass<TopkDecomposer>(); HloPredicate upcaster_filter = [&](const HloInstruction* instr) { const auto* cuda_cc = std::get_if<se::CudaComputeCapability>( &gpu_target_config.device_description.gpu_compute_capability()); if (cuda_cc != nullptr && !cuda_cc->IsAtLeast(se::CudaComputeCapability::VOLTA)) { return true; } return !gpu::IsMatrixMultiplication(*instr); }; pipeline.AddPass<DotDimensionSorter>(); pipeline.AddPass<DotDecomposer>(); pipeline.AddPass<ResultCaster>(upcaster_filter); pipeline.AddPass<OperandUpcaster>(upcaster_filter); pipeline.AddPass<DotOperandConverter>(); pipeline.AddPass<SubByteNormalization>( SubByteNormalization::SET_ELEMENT_SIZE); pipeline.AddPass<RngExpander>(); pipeline.AddPass<RngBitGeneratorExpander>(RandomAlgorithm::RNG_PHILOX); pipeline.AddPass<ComparisonExpander>(std::array{std::make_pair(BF16, F32)}); pipeline.AddPass<ZeroSizedHloElimination>(); if (RequireDeterminism(hlo_module->config())) { pipeline.AddPass<ScatterExpander>( ScatterExpander::kEliminateIndeterministicScatters); } pipeline.AddPass<GpuScatterExpander>(); pipeline.AddPass<QrExpander>(); pipeline.AddPass<EighExpander>(); pipeline.AddPass<DynamicIndexSplitter>(); pipeline.AddPass<CallInliner>(); pipeline.AddPass<StochasticConvertDecomposer>(); pipeline.AddPass<Convolution4DExpander>(); pipeline.AddPass<ConvolutionPredExpander>(); pipeline.AddPass<StableSortExpander>(); pipeline.AddPass<BatchNormExpander>( true, true, true); pipeline.AddPass<LogisticExpander>(); pipeline.AddPass<ConditionalCanonicalizer>(); pipeline.AddPass<DynamicDimensionSimplifier>(); if (debug_options.xla_reduce_window_rewrite_base_length() != 0) { pipeline.AddPass<HloPassFix<ReduceWindowRewriter>>( debug_options.xla_reduce_window_rewrite_base_length()); } DynamicPadderOptions dynamic_padder_options; switch (debug_options.xla_gpu_shape_checks()) { case DebugOptions::IGNORE: dynamic_padder_options.shape_check_mode = DynamicDimensionInference::ShapeCheckMode::kIgnore; break; case DebugOptions::RUNTIME: { dynamic_padder_options.shape_check_mode = DynamicDimensionInference::ShapeCheckMode::kRuntime; dynamic_padder_options.assertion_generator = [&](HloInstruction* inst) { auto created = Cast<HloCustomCallInstruction>( inst->parent()->AddInstruction(HloInstruction::CreateCustomCall( ShapeUtil::MakeTokenShape(), {inst}, kXlaGpuAssertCustomCallTag, "Buffers have different size at runtime", API_VERSION_STATUS_RETURNING))); created->set_custom_call_has_side_effect(true); }; break; } case DebugOptions::COMPILE_TIME: dynamic_padder_options.shape_check_mode = DynamicDimensionInference::ShapeCheckMode::kCompileTime; break; default: LOG(FATAL) << "Unreachable"; } pipeline.AddPass<DynamicPadder>(dynamic_padder_options); se::GpuComputeCapability gpu_version = gpu_target_config.device_description.gpu_compute_capability(); [&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>("simplification")] { AddHloVerifier(&pipeline, !debug_options.xla_experimental_ignore_channel_id(), HloVerifierOpts{}, true); pipeline.AddPass<ZeroSizedHloElimination>(); pipeline.AddPass<GatherSimplifier>(); pipeline.AddPass<GatherExpander>(GatherExpander::kEliminateSimpleGathers); pipeline.AddPass<ScatterSimplifier>(); pipeline.AddPass<ScatterExpander>( ScatterExpander::kEliminateSimpleScatters); pipeline.AddPass<ScatterSliceSimplifier>(); pipeline.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts, gpu_version); pipeline.AddPass<BitcastDtypesExpander>(); pipeline.AddPass<DotDimensionSorter>(); pipeline.AddPass<DotDecomposer>(); pipeline.AddPass<DotMerger>( int64_t{ debug_options.xla_gpu_dot_merger_threshold_mb()} << 20); pipeline.AddPass<SortSimplifier>(); pipeline.AddPass<TupleSimplifier>(); pipeline.AddPass<WhileLoopConstantSinking>(); pipeline.AddPass<WhileLoopSimplifier>(); pipeline.AddPass<SliceSinker>(); ReshapeMoverOptions reshape_mover_options; reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true; pipeline.AddPass<ReshapeMover>(reshape_mover_options); pipeline.AddPass<HloConstantFolding>(); pipeline.AddPass<ConditionalSimplifier>(); pipeline.AddPass<RealImagExpander>(); pipeline.AddPass<TransposeFolding>(CanFoldTransposeOperandIntoDot); pipeline.AddPass<HloCSE>(false); pipeline.AddPass<HloDCE>(); }(); [&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>("simplification-2")] { pipeline.AddPass<ConvertMover>(); pipeline.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts, gpu_version); }(); pipeline.AddPass<HloComputationDeduplicator>( false); return pipeline.Run(hlo_module).status(); } absl::Status AddCollectivePipelinerPasses( const DebugOptions& debug_options, HloPassPipeline& collectives_pipeline) { if (debug_options.xla_gpu_enable_pipelined_collectives() || debug_options.xla_gpu_enable_pipelined_all_reduce()) { CollectivePipeliner::Config config{ 0, INT64_MAX, true, false, true, CollectivePipeliner::PipeliningDirection::kForward, HloPredicateIsOp<HloOpcode::kAllReduce>, HloPredicateTrue, HloPredicateFalse}; collectives_pipeline.AddPass<CollectivePipeliner>(config); } if (debug_options.xla_gpu_enable_pipelined_collectives() || debug_options.xla_gpu_enable_pipelined_all_gather()) { CollectivePipeliner::Config config{ 0, INT64_MAX, true, false, true, CollectivePipeliner::PipeliningDirection::kBackward, HloPredicateIsOp<HloOpcode::kAllGather>, HloPredicateTrue, HloPredicateFalse, HloPredicateFalse, false, std::nullopt, std::nullopt, true, }; collectives_pipeline.AddPass<CollectivePipeliner>(config); } if (debug_options.xla_gpu_enable_pipelined_collectives() || debug_options.xla_gpu_enable_pipelined_reduce_scatter()) { CollectivePipeliner::Config config{ 0, INT64_MAX, true, false, true, CollectivePipeliner::PipeliningDirection::kForward, HloPredicateIsOp<HloOpcode::kReduceScatter>, HloPredicateTrue, HloPredicateFalse}; collectives_pipeline.AddPass<CollectivePipeliner>(config); } return absl::OkStatus(); } absl::Status RunPostLayoutCollectivePipelinerPasses(HloModule* hlo_module) { const DebugOptions& debug_options = hlo_module->config().debug_options(); HloPassPipeline collectives_pipeline("collective-pipeliner-optimizations"); if (debug_options.xla_gpu_run_post_layout_collective_pipeliner()) { TF_RETURN_IF_ERROR( AddCollectivePipelinerPasses(debug_options, collectives_pipeline)); collectives_pipeline.AddPass<WhileLoopTripCountAnnotator>(); collectives_pipeline.AddPass<FlattenCallGraph>(); } return collectives_pipeline.Run(hlo_module).status(); } absl::Status RunCollectiveOptimizationPasses( HloModule* hlo_module, const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts, se::GpuComputeCapability gpu_version) { const DebugOptions& debug_options = hlo_module->config().debug_options(); HloPassPipeline collectives_pipeline("collective-optimizations"); collectives_pipeline.AddPass<AllReduceFolder>(); collectives_pipeline.AddPass<AllReduceSplitter>(); collectives_pipeline.AddPass<AllGatherOptimizer>(); collectives_pipeline.AddPass<AllReduceReassociate>( debug_options.xla_gpu_enable_reassociation_for_converted_ar()); collectives_pipeline.AddPass<ReduceScatterReassociate>(); collectives_pipeline.AddPass<WhileLoopAllReduceCodeMotion>( debug_options .xla_gpu_enable_while_loop_reduce_scatter_code_motion()); if (!debug_options.xla_gpu_run_post_layout_collective_pipeliner()) { TF_RETURN_IF_ERROR( AddCollectivePipelinerPasses(debug_options, collectives_pipeline)); } collectives_pipeline.AddPass<ReduceScatterCreator>(); collectives_pipeline.AddPass<CollectivePermuteCycleDecomposer>( hlo_module->config() .debug_options() .xla_gpu_collective_permute_decomposer_threshold()); collectives_pipeline.AddPass<CollectivePermuteDecomposer>( hlo_module->config() .debug_options() .xla_gpu_collective_permute_decomposer_threshold()); if (hlo_module->config() .debug_options() .xla_gpu_enable_pipelined_collectives() || hlo_module->config().debug_options().xla_gpu_enable_pipelined_p2p()) { AddP2PPipeliner(collectives_pipeline); } collectives_pipeline.AddPass<GpuAlgebraicSimplifier>( layout_insensitive_algsimp_opts, gpu_version); collectives_pipeline.AddPass<AllGatherBroadcastReorder>(); const std::pair<PrimitiveType, PrimitiveType> ar_promoted_types[] = { {U16, U32}, {S16, S32}}; collectives_pipeline.AddPass<AllReducePromotion>(ar_promoted_types); collectives_pipeline.AddPass<HloDCE>(); collectives_pipeline.AddPass<CollectiveQuantizer>(); collectives_pipeline.AddPass<HloDCE>(); collectives_pipeline.AddPass<WhileLoopTripCountAnnotator>(); return collectives_pipeline.Run(hlo_module).status(); } absl::Status RunLayoutAssignmentPasses(HloModule* hlo_module, se::GpuComputeCapability gpu_version, se::dnn::VersionInfo dnn_version) { HloPassPipeline pipeline("layout assignment"); pipeline.AddPass<FlattenCallGraph>(); ChannelLayoutConstraints layout_constraints; pipeline.AddPass<GpuLayoutAssignment>( hlo_module->mutable_entry_computation_layout(), gpu_version, dnn_version, &layout_constraints); pipeline.AddPass<SubByteNormalization>( SubByteNormalization::SET_ELEMENT_SIZE); pipeline.AddPass<OptimizeInputOutputBufferAlias>(true); pipeline.AddPass<HostOffloadLegalize>( static_cast<int64_t>(stream_executor::MemoryType::kHost), true); return pipeline.Run(hlo_module).status(); } absl::Status RunFusionPasses(HloModule* hlo_module, const Compiler::TargetConfig& gpu_target_config, tsl::thread::ThreadPool* thread_pool, HloCostAnalysis::ShapeSizeFunction shape_size_fn) { const se::DeviceDescription& gpu_device_info = gpu_target_config.device_description; TF_RETURN_IF_ERROR(FusionPipeline(hlo_module->config().debug_options(), shape_size_fn, thread_pool, gpu_device_info) .Run(hlo_module) .status()); if (hlo_module->config().debug_options().xla_gpu_collect_cost_model_stats()) { GpuHloCostAnalysis::Options cost_analysis_options{ shape_size_fn, {}, {}, true}; HloPassPipeline post_fusion_analysis("post_fusion_analysis"); post_fusion_analysis.AddPass<GpuCostModelStatsCollection>( gpu_device_info, cost_analysis_options); TF_RETURN_IF_ERROR(post_fusion_analysis.Run(hlo_module).status()); } TF_RETURN_IF_ERROR( HorizontalFusionPipeline(gpu_device_info).Run(hlo_module).status()); if (VLOG_IS_ON(2)) { HloFusionStatsVisitor stats; TF_RETURN_IF_ERROR(hlo_module->entry_computation()->Accept(&stats)); VLOG(2) << stats.ToString(); } return absl::OkStatus(); } void AddDoubleBufferingPasses(const DebugOptions& opts, HloPassPipeline& pipeline) { std::optional<DoubleBufferLoopUnrolling::UnrollStrategy> unroll_strategy = std::nullopt; if (opts.xla_gpu_enable_while_loop_double_buffering()) { unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer; } if (opts.xla_gpu_enable_while_loop_unrolling() == DebugOptions::WHILE_LOOP_UNROLLING_DOUBLE_BUFFER) { unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer; } if (opts.xla_gpu_enable_while_loop_unrolling() == DebugOptions::WHILE_LOOP_UNROLLING_FULL_UNROLL) { LOG_IF(WARNING, unroll_strategy != std::nullopt) << "Overriding double buffering set via " "`xla_gpu_enable_while_loop_double_buffering` flag."; unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll; } if (opts.xla_gpu_enable_while_loop_unrolling() == DebugOptions::WHILE_LOOP_UNROLLING_AUTO_UNROLL && opts.xla_gpu_enable_heuristic_pass_configuration() && !opts.xla_gpu_enable_while_loop_double_buffering()) { unroll_strategy = DoubleBufferLoopUnrolling::UnrollStrategy::kAuto; } if (unroll_strategy != std::nullopt) { pipeline.AddPass<WhileLoopSimplifier>(); pipeline.AddPass<DoubleBufferLoopUnrolling>(*unroll_strategy); pipeline.AddPass<TupleSimplifier>(); pipeline.AddPass<HloDCE>(); } } absl::Status RunPostFusionPasses( HloModule* hlo_module, std::function<absl::Status(HloPassPipeline*, const DebugOptions&)> add_custom_kernel_replacement_passes) { const DebugOptions& opts = hlo_module->config().debug_options(); HloPassPipeline pipeline("post-fusion optimization"); pipeline.AddPass<RenameFusions>(); pipeline.AddPass<AllGatherCombiner>( opts.xla_gpu_all_gather_combine_threshold_bytes(), 256, opts.xla_gpu_enable_all_gather_combine_by_dim()); pipeline.AddPass<AllReduceCombiner>( opts.xla_gpu_all_reduce_combine_threshold_bytes(), 256); pipeline.AddPass<ReduceScatterCombiner>( opts.xla_gpu_reduce_scatter_combine_threshold_bytes(), 256, opts.xla_gpu_enable_reduce_scatter_combine_by_dim()); pipeline.AddPass<AllReduceContiguous>(); TF_RETURN_IF_ERROR(add_custom_kernel_replacement_passes(&pipeline, opts)); int32_t blueconnect_num_devices_per_host = hlo_module->config() .debug_options() .xla_gpu_all_reduce_blueconnect_num_devices_per_host(); if (blueconnect_num_devices_per_host > 0) { pipeline.AddPass<AllReduceBlueConnect>(blueconnect_num_devices_per_host); } AddDoubleBufferingPasses(opts, pipeline); return pipeline.Run(hlo_module).status(); } absl::Status RunPostFusionCollectiveOptimizationPasses(HloModule* hlo_module) { HloPassPipeline pipeline("post-fusion-collectives optimization"); AsyncCollectiveCreator::CollectiveCreatorConfig config; config.convert_all_reduce = HloPredicateTrue; config.convert_collective_broadcast = HloPredicateTrue; config.convert_collective_permute = HloPredicateTrue; config.convert_all_gather = HloPredicateTrue; config.convert_reduce_scatter = HloPredicateTrue; config.convert_all_to_all = HloPredicateTrue; pipeline.AddPass<AsyncCollectiveCreator>(std::move(config)); absl::flat_hash_set<DebugOptions::CollectiveOpType> disabled_async_ops; for (auto collective_op_type : hlo_module->config() .debug_options() .xla_gpu_disable_async_collectives()) { disabled_async_ops.insert( static_cast<DebugOptions::CollectiveOpType>(collective_op_type)); } auto convert_to_async = [&disabled_async_ops](const HloInstruction* inst) { switch (inst->opcode()) { case HloOpcode::kAllReduceStart: return !disabled_async_ops.contains(DebugOptions::ALLREDUCE); case HloOpcode::kCollectivePermuteStart: return !disabled_async_ops.contains(DebugOptions::COLLECTIVEPERMUTE); case HloOpcode::kAllGatherStart: return !disabled_async_ops.contains(DebugOptions::ALLGATHER); case HloOpcode::kAsyncStart: { auto async_inst = Cast<HloAsyncInstruction>(inst); switch (async_inst->async_wrapped_opcode()) { case HloOpcode::kCollectiveBroadcast: return !disabled_async_ops.contains( DebugOptions::COLLECTIVEBROADCAST); case HloOpcode::kReduceScatter: return !disabled_async_ops.contains(DebugOptions::REDUCESCATTER); case HloOpcode::kAllToAll: return !disabled_async_ops.contains(DebugOptions::ALLTOALL); default: return false; } } default: return false; } }; pipeline.AddPass<AsyncCollectiveAnnotator>(convert_to_async); return pipeline.Run(hlo_module).status(); } absl::Status RunPostFusionSimplificationPasses( HloModule* hlo_module, const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts, se::GpuComputeCapability gpu_version) { HloPassPipeline pipeline("post-fusion-simplification-pipeline optimization"); AlgebraicSimplifierOptions options = layout_insensitive_algsimp_opts; options.set_is_layout_sensitive(true); pipeline.AddPass<GpuAlgebraicSimplifier>(options, gpu_version); pipeline.AddPass<HloComputationDeduplicator>( true); if (hlo_module->config() .debug_options() .xla_gpu_multi_streamed_windowed_einsum()) { pipeline.AddPass<StreamAttributeAnnotator>(); pipeline.AddPass<StreamAttributeAsyncWrapper>(); } return pipeline.Run(hlo_module).status(); } absl::Status RunPostFusionVerificationPasses( HloModule* hlo_module, se::StreamExecutor* stream_exec, const GpuCompiler::CompileOptions& options, const Compiler::TargetConfig& gpu_target_config) { HloPassPipeline pipeline("post-fusion-verification-pipeline optimization"); if (hlo_module->config() .debug_options() .xla_gpu_verify_triton_fusion_numerics()) { TF_ASSIGN_OR_RETURN( AutotuneConfig autotune_config, GetAutotuneConfig(stream_exec, hlo_module->config().debug_options(), options, gpu_target_config)); pipeline.AddPass<TritonFusionNumericsVerifier>(autotune_config); } return pipeline.Run(hlo_module).status(); } absl::Status RunLayoutNormalizationPasses( HloModule* hlo_module, const se::GpuComputeCapability& gpu_version) { HloPassPipeline layout_normalization_pipeline("layout normalization"); const DebugOptions& debug_options = hlo_module->config().debug_options(); AlgebraicSimplifierOptions opts = GpuCompiler::GetAlgebraicSimplifierOptions(hlo_module->config()); opts.set_supports_non_canonical_dots(false); opts.set_is_layout_sensitive(true); opts.set_enable_conv_operand_swap(false); opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max()); opts.set_enable_unconditional_reduce_of_concat_replacement(false); layout_normalization_pipeline.AddPass<ReshapeDecomposer>(); layout_normalization_pipeline.AddPass<HloPassFix<MoveCopyToUsers>>(); layout_normalization_pipeline.AddPass<LayoutNormalization>( &NormalizeLayoutForGpuCustomCalls); layout_normalization_pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>( opts, gpu_version); layout_normalization_pipeline.AddPass<BroadcastCanonicalizer>(); layout_normalization_pipeline.AddPass<ScatterSimplifier>(); return layout_normalization_pipeline.Run(hlo_module).status(); } absl::Status RunAsyncDotPasses(HloModule* hlo_module) { HloPassPipeline pipeline("async-wrapper"); const DebugOptions& debug_options = hlo_module->config().debug_options(); if (debug_options.xla_gpu_async_dot()) { pipeline.AddPass<AsyncWrapper>([](HloInstruction* instruction) { if (IsCublasGemm(*instruction)) { return true; } if (instruction->called_computations().size() == 1 && IsTritonFusedComputation( *instruction->called_computations().front())) { return true; } return false; }); } return pipeline.Run(hlo_module).status(); } absl::Status RunDynamicSliceFusionPasses(HloModule* hlo_module, se::Platform::Id platform_id) { if (hlo_module->config() .debug_options() .xla_gpu_enable_dynamic_slice_fusion()) { HloPassPipeline pipeline("dynamic-slice"); TF_ASSIGN_OR_RETURN(se::Platform * platform, se::PlatformManager::PlatformWithId(platform_id)); pipeline.AddPass<DynamicSliceFusionRewriter>(platform->Name()); TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status()); } return absl::OkStatus(); } } absl::Status GpuCompiler::RunCollectiveScheduleLinearizerPasses( HloModule* hlo_module, se::StreamExecutor* stream_exec) { HloPassPipeline pipeline("collective-schedule-linearizer"); pipeline.AddPass<CollectivesScheduleLinearizer>( [this, stream_exec](const HloModule* module) { return RequiresCollectiveScheduleLinearizer(module, stream_exec); }); return pipeline.Run(hlo_module).status(); } absl::Status GpuCompiler::OptimizeHloModule( HloModule* hlo_module, se::StreamExecutor* stream_exec, const CompileOptions& options, const TargetConfig& gpu_target_config) { tsl::profiler::TraceMe traceme("GpuCompiler::OptimizeHloModule"); CheckNotScheduled(hlo_module); LogDebugOptions(hlo_module); MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool( hlo_module->config() .debug_options() .xla_gpu_force_compilation_parallelism(), options.thread_pool, tsl::port::MaxParallelism()); AlgebraicSimplifierOptions layout_insensitive_algsimp_opts = LayoutInsensitiveAlgebraicSimplifierOptions( hlo_module->config(), gpu_target_config, GetAlgebraicSimplifierOptions(hlo_module->config())); TF_RETURN_IF_ERROR(RunPreSPMDPartitionerPasses(hlo_module)); TF_RETURN_IF_ERROR(RunSPMDPasses(hlo_module, gpu_target_config, layout_insensitive_algsimp_opts)); TF_RETURN_IF_ERROR(RunOptimizationPasses(hlo_module, gpu_target_config, layout_insensitive_algsimp_opts)); se::GpuComputeCapability gpu_version = gpu_target_config.device_description.gpu_compute_capability(); TF_RETURN_IF_ERROR(RunCollectiveOptimizationPasses( hlo_module, layout_insensitive_algsimp_opts, gpu_version)); se::dnn::VersionInfo dnn_version = gpu_target_config.dnn_version_info; if (stream_exec != nullptr) { gpu_version = GetGpuVersion(stream_exec); TF_ASSIGN_OR_RETURN(dnn_version, GetDnnVersionInfo(stream_exec)); } TF_RETURN_IF_ERROR(OptimizeHloConvolutionCanonicalization( hlo_module, gpu_version, dnn_version, options.device_allocator, gpu_target_config.device_description.runtime_version())); TF_RETURN_IF_ERROR( RunLayoutAssignmentPasses(hlo_module, gpu_version, dnn_version)); TF_RETURN_IF_ERROR(RunLayoutNormalizationPasses(hlo_module, gpu_version)); TF_RETURN_IF_ERROR(OptimizeHloPostLayoutAssignment( hlo_module, stream_exec, options, gpu_target_config, thread_pool.get_mutable())); TF_RETURN_IF_ERROR(RunPostLayoutCollectivePipelinerPasses(hlo_module)); TF_RETURN_IF_ERROR(RunDynamicSliceFusionPasses(hlo_module, PlatformId())); TF_RETURN_IF_ERROR(RunFusionPasses(hlo_module, gpu_target_config, thread_pool.get_mutable(), ShapeSizeBytesFunction())); TF_RETURN_IF_ERROR(RunPostFusionPasses( hlo_module, [this](HloPassPipeline* pipeline, const DebugOptions& debug_options) { return AddCustomKernelReplacementPasses(pipeline, debug_options); })); TF_RETURN_IF_ERROR(RunPostFusionCollectiveOptimizationPasses(hlo_module)); TF_RETURN_IF_ERROR(RunPostFusionSimplificationPasses( hlo_module, layout_insensitive_algsimp_opts, gpu_version)); TF_RETURN_IF_ERROR(RunPostFusionVerificationPasses( hlo_module, stream_exec, options, gpu_target_config)); TF_RETURN_IF_ERROR( RunCollectiveScheduleLinearizerPasses(hlo_module, stream_exec)); TF_RETURN_IF_ERROR(RunAsyncDotPasses(hlo_module)); return absl::OkStatus(); } AlgebraicSimplifierOptions GpuCompiler::GetAlgebraicSimplifierOptions( const HloModuleConfig& config) { AlgebraicSimplifierOptions opts; opts.set_enable_dot_strength_reduction( config.debug_options().xla_gpu_enable_dot_strength_reduction()); return opts; } absl::Status GpuCompiler::PrepareHloModuleForIrEmitting(HloModule* hlo_module) { return PrepareHloModuleForIrEmittingPipeline(*hlo_module, GetCanShareBuffer()) .Run(hlo_module) .status(); } namespace { void AddGemmRewriterPasses(HloPassPipeline& pipeline, const DebugOptions& debug_options, const se::GpuComputeCapability gpu_version, const se::SemanticVersion& toolkit_version) { GemmRewriterOptions::BiasMode bias_mode = GemmRewriterOptions::BiasMode::kBias; if (debug_options.xla_gpu_async_dot()) { bias_mode = GemmRewriterOptions::BiasMode::kNoBias; } pipeline.AddPass<GemmRewriter>( gpu_version, toolkit_version, GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only, bias_mode}); pipeline.AddPass<GemmRewriter>( gpu_version, toolkit_version, GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only, bias_mode}); } } absl::Status GpuCompiler::OptimizeHloPostLayoutAssignment( HloModule* hlo_module, se::StreamExecutor* stream_exec, const CompileOptions& options, const TargetConfig& gpu_target_config, tsl::thread::ThreadPool* thread_pool) { const DebugOptions& debug_options = hlo_module->config().debug_options(); const se::GpuComputeCapability gpu_version = gpu_target_config.device_description.gpu_compute_capability(); const AlgebraicSimplifierOptions simplifier_options = [&] { AlgebraicSimplifierOptions opts = GetAlgebraicSimplifierOptions(hlo_module->config()); opts.set_supports_non_canonical_dots(false); opts.set_is_layout_sensitive(true); opts.set_enable_conv_operand_swap(false); opts.set_minmax_propagate_nan(!debug_options.xla_gpu_enable_fast_min_max()); opts.set_enable_unconditional_reduce_of_concat_replacement(false); return opts; }(); TF_ASSIGN_OR_RETURN(AutotuneConfig autotune_config, GetAutotuneConfig(stream_exec, debug_options, options, gpu_target_config)); const GpuFloatSupport bf16_support(gpu_version, BF16); const GpuFloatSupport f8e5m2_support(gpu_version, F8E5M2, F16); const GpuFloatSupport f8e4m3_support(gpu_version, F8E4M3, F16); const GpuFloatSupport f8e4m3fn_support(gpu_version, F8E4M3FN, F16); const FloatSupport f8e4m3b11fnuz_support(F8E4M3B11FNUZ, F16); const GpuFloatSupport f8e5m2fnuz_support(gpu_version, F8E5M2FNUZ, F16); const GpuFloatSupport f8e4m3fnuz_support(gpu_version, F8E4M3FNUZ, F16); const GpuFloatSupport f8e3m4_support(gpu_version, F8E3M4, F16); auto add_float_normalization = [&](HloPassPipeline& pipeline) { auto& sub_pipeline = pipeline.AddPass<HloPassPipeline>("float_normalization"); sub_pipeline.AddPass<FloatNormalization>(&bf16_support); sub_pipeline.AddPass<FloatNormalization>(&f8e5m2_support); sub_pipeline.AddPass<FloatNormalization>(&f8e4m3_support); sub_pipeline.AddPass<FloatNormalization>(&f8e4m3fn_support); sub_pipeline.AddPass<FloatNormalization>(&f8e4m3b11fnuz_support); sub_pipeline.AddPass<FloatNormalization>(&f8e5m2fnuz_support); sub_pipeline.AddPass<FloatNormalization>(&f8e4m3fnuz_support); sub_pipeline.AddPass<FloatNormalization>(&f8e3m4_support); if (debug_options.xla_allow_excess_precision()) { sub_pipeline.AddPass<SimplifyFPConversions>(); } }; { HloPassPipeline pipeline("hlo normalization"); pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options, gpu_version); pipeline.AddPass<TransposeFolding>(CanFoldTransposeOperandIntoDot, TransposeFolding::NeverFoldTranspose); pipeline.AddPass<ReshapeDecomposer>(); pipeline.AddPass<ReduceDecomposer>([&](const HloInstruction* r) { return IsReductionFromOrToContiguousDimensions(*r); }); if (debug_options.xla_gpu_enable_custom_fusions()) { pipeline.AddPass<SimplifyFPConversions>(); pipeline.AddPass<CustomKernelFusionRewriter>( &gpu_target_config.device_description); pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config); } se::GpuComputeCapability gpu_version = gpu_target_config.device_description.gpu_compute_capability(); pipeline.AddPass<AlgorithmChecker>(gpu_version); const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(&gpu_version); const auto* rocm_cc = std::get_if<se::RocmComputeCapability>(&gpu_version); if (debug_options.xla_gpu_enable_triton_gemm() && (cuda_cc != nullptr && cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE))) { pipeline.AddPass<GemvRewriter>(); pipeline.AddPass<GemmFusion>(gpu_version); } else if (cuda_cc != nullptr && cuda_cc->major == se::CudaComputeCapability::VOLTA) { pipeline.AddPass<SimplifyFPConversions>(); pipeline.AddPass<CustomKernelFusionRewriter>( &gpu_target_config.device_description); pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config); } AddGemmRewriterPasses( pipeline, debug_options, gpu_version, gpu_target_config.device_description.runtime_version()); pipeline.AddPass<GemmBroadcastFoldingRewriter>(); pipeline.AddPass<LayoutNormalization>(&NormalizeLayoutForGpuCustomCalls); pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options, gpu_version); pipeline.AddPass<ScatterSimplifier>(); pipeline.AddPass<BroadcastCanonicalizer>(); pipeline.AddPass<TransposeDimensionGrouper>(); pipeline.AddPass<ReductionDegenerateDimRemover>(); pipeline.AddPass<ReductionLayoutNormalizer>(); if (debug_options .xla_gpu_experimental_enable_triton_softmax_priority_fusion() && ((cuda_cc != nullptr && cuda_cc->IsAtLeast(se::CudaComputeCapability::AMPERE)) || rocm_cc != nullptr)) { add_float_normalization(pipeline); pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options, gpu_version); pipeline.AddPass<HloCSE>(true); pipeline.AddPass<HloConstantFolding>(); pipeline.AddPass<HloDCE>(); pipeline.AddPass<SoftmaxRewriterTriton>( gpu_target_config.device_description, ShapeSizeBytesFunction(), true); } pipeline.AddPass<ReductionDimensionGrouper>(); bool ignore_small_reduce_dims = !debug_options.xla_gpu_enable_priority_fusion(); pipeline.AddPass<HloPassFix<ReductionSplitter>>(ignore_small_reduce_dims); pipeline.AddPass<HloPassFix<TreeReductionRewriter>>(gpu_version); pipeline.AddPass<SubByteNormalization>( SubByteNormalization::SET_ELEMENT_SIZE); TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status()); } HloPassPipeline pipeline("post-layout_assignment"); AddHloVerifier(&pipeline, !debug_options.xla_experimental_ignore_channel_id(), HloVerifierOpts{} .MakeLayoutSensitive() .WithInstructionCanChangeLayout( LayoutAssignment::InstructionCanChangeLayout) .VerifyBroadcastDimensionsOrder() .VerifyReshapeIsBitcast(), true); add_float_normalization(pipeline); TF_RETURN_IF_ERROR(AddGemmFusionAutotuningPasses( &pipeline, hlo_module, autotune_config, thread_pool, options.key_value_store, gpu_target_config.device_description.runtime_version())); pipeline.AddPass<CallInliner>(); AddGemmRewriterPasses(pipeline, debug_options, gpu_version, gpu_target_config.device_description.runtime_version()); pipeline.AddPass<GemmBroadcastFoldingRewriter>(); pipeline.AddPass<HostOffloader>( static_cast<int64_t>(stream_executor::MemoryType::kHost)); TF_RETURN_IF_ERROR( AddConvAndGemmAutotuningPasses(&pipeline, gpu_version, options, hlo_module, autotune_config, thread_pool)); add_float_normalization(pipeline); pipeline.AddPass<TupleSimplifier>(); pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(simplifier_options, gpu_version); if (debug_options.xla_allow_excess_precision()) { pipeline.AddPass<SimplifyFPConversions>(); } pipeline.AddPass<HloCSE>(true); pipeline.AddPass<HostMemoryTransferAsyncifier>( static_cast<int64_t>(stream_executor::MemoryType::kHost)); #ifdef NDEBUG HloVerifierOpts opts = HloVerifierOpts{} .MakeLayoutSensitive() .WithInstructionCanChangeLayout( LayoutAssignment::InstructionCanChangeLayout) .VerifyBroadcastDimensionsOrder() .VerifyReshapeIsBitcast(); opts.verify_unique_channel_ids = !debug_options.xla_experimental_ignore_channel_id(); pipeline.AddPass<HloVerifier>( std::make_unique<DefaultVerifierMetadata>(std::move(opts)), "end-of-post-layout_assignment"); #endif TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status()); return absl::OkStatus(); } absl::StatusOr<Compiler::TargetConfig> GpuCompiler::GetTargetConfig( const Compiler::CompileOptions& options, const DebugOptions& debug_opts, se::StreamExecutor* executor) { if (options.target_config.has_value()) { return *options.target_config; } if (!debug_opts.xla_gpu_target_config_filename().empty()) { std::string gpu_target_config_string; TF_RETURN_IF_ERROR(tsl::ReadFileToString( tsl::Env::Default(), debug_opts.xla_gpu_target_config_filename(), &gpu_target_config_string)); stream_executor::GpuTargetConfigProto gpu_target_config_proto; if (!tsl::protobuf::TextFormat::ParseFromString(gpu_target_config_string, &gpu_target_config_proto)) { return absl::FailedPreconditionError( "Failed to parse GpuTargetConfigProto"); } return Compiler::TargetConfig{gpu_target_config_proto}; } if (executor) { Compiler::TargetConfig target_config = Compiler::TargetConfig{executor}; int64_t device_memory_size = target_config.device_description.device_memory_size(); if (device_memory_size == -1) { return absl::FailedPreconditionError( "When running on an NVIDIA simulation device, you must use " "--xla_gpu_target_config_filename to pass in target information. " "The target config from StreamExecutor is inaccurate."); } return target_config; } return absl::InternalError( "Either GPU has to be attached, or --xla_gpu_target_config_filename " "has to be specified to specify the target to compile for."); } absl::StatusOr<std::unique_ptr<HloModule>> GpuCompiler::RunHloPasses( std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec, const CompileOptions& options) { const DebugOptions debug_opts = module->config().debug_options(); TF_RETURN_IF_ERROR(LoadAutotuneResultsFromFile(debug_opts)); bool is_deviceless = options.target_config.has_value() || !debug_opts.xla_gpu_target_config_filename().empty(); TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config, GetTargetConfig(options, debug_opts, stream_exec)); const std::optional<std::string> unoptimized_fingerprint = MaybeUploadUnoptimizedGpuSymbols(module.get(), gpu_target_config.ToProto()); XLA_SCOPED_LOGGING_TIMER_IF( absl::StrCat("GpuCompiler::RunHloPasses for ", module->name()), !options.is_autotuning_compilation); uint64_t start_usecs = tsl::Env::Default()->NowMicros(); tsl::profiler::TraceMe activity( [&] { return absl::StrCat("HLO Transforms:", module->name()); }, tsl::profiler::TraceMeLevel::kInfo); TF_RETURN_IF_ERROR(OptimizeHloModule(module.get(), is_deviceless ? nullptr : stream_exec, options, gpu_target_config)); TF_RETURN_IF_ERROR(PrepareHloModuleForIrEmitting(module.get())); if (module->config() .debug_options() .xla_gpu_experimental_enable_fusion_block_level_rewriter()) { HloPassPipeline pipeline("fusion-block-level-rewriter-pipeline"); pipeline.AddPass<FusionBlockLevelRewriter>( gpu_target_config.device_description, ShapeSizeBytesFunction()); TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status()); } uint64_t end_usecs = tsl::Env::Default()->NowMicros(); RecordHloPassesDuration(end_usecs - start_usecs); DumpHloModuleMetadataIfEnabled({module.get()}); AutotuneResults autotune_results; TF_ASSIGN_OR_RETURN( AutotuneConfig autotune_config, GetAutotuneConfig(stream_exec, debug_opts, options, gpu_target_config)); if (!is_deviceless) { TF_RETURN_IF_ERROR( AutotunerUtil::SerializeAutotuneResults(&autotune_results)); TF_RETURN_IF_ERROR(SerializeAutotuneResultsToFile(debug_opts)); } const std::optional<std::string> optimized_fingerprint = MaybeUploadOptimizedGpuSymbols(module.get(), autotune_results); if (unoptimized_fingerprint.has_value() && optimized_fingerprint.has_value()) { MaybeUploadGpuSymbolMapping(*unoptimized_fingerprint, *optimized_fingerprint); } if (DumpingEnabledForHloModule(*module)) { TF_ASSIGN_OR_RETURN( std::string autotune_results, AutotunerUtil::SerializeAutotuneResults(true)); DumpToFileInDirOrStdout(*module, "", "autotune_results.pbtxt", autotune_results); } return std::move(module); } namespace { absl::Status RunPostSchedulingCopyInsertion( HloModule* module, const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) { constexpr int64_t kRegionBasedLiveRangeAnalysisLimit = -1; const int64_t kUseRegionBasedLiveRangeAnalysis = module->config() .debug_options() .xla_gpu_copy_insertion_use_region_analysis() ? kRegionBasedLiveRangeAnalysisLimit : 0; CopyInsertion copy_insertion(can_share_buffer, kUseRegionBasedLiveRangeAnalysis); TF_RETURN_IF_ERROR(copy_insertion.RemoveUnnecessaryCopies(module)); HloSchedule saved_schedule = module->schedule(); module->clear_schedule(); TF_RETURN_IF_ERROR( copy_insertion.CopyInsertion::AddSpecialCaseCopies(module)); TF_RETURN_IF_ERROR(HloDCE().Run(module).status()); TF_RETURN_IF_ERROR(saved_schedule.Update()); TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule))); return absl::OkStatus(); } } using OutputInfoMap = absl::flat_hash_map<ShapeIndex, GpuExecutable::OutputInfo>; static void NullDiagnosticHandler(const llvm::DiagnosticInfo* diag_info, void* context) { std::string error_string; llvm::raw_string_ostream string_printer(error_string); llvm::DiagnosticPrinterRawOStream diagnostic_printer(string_printer); diag_info->print(diagnostic_printer); VLOG(5) << error_string; } namespace { std::unique_ptr<llvm::Module> CopyToContext(const llvm::Module& module, llvm::LLVMContext& context) { llvm::SmallString<0> bitcode; llvm::raw_svector_ostream bitcode_ostream(bitcode); llvm::WriteBitcodeToFile(module, bitcode_ostream); llvm::Expected<std::unique_ptr<llvm::Module>> new_module = llvm::parseBitcodeFile( llvm::MemoryBufferRef(llvm::StringRef(bitcode.data(), bitcode.size()), "split_module"), context); CHECK(new_module) << "Failed to parse bitcode " << llvm::toString(new_module.takeError()); return std::move(new_module.get()); } } absl::StatusOr<GpuCompiler::BackendCompileResult> GpuCompiler::CompileSingleModule(const HloModuleConfig& module_config, se::GpuComputeCapability gpu_version, const HloModule* debug_module, llvm::Module* llvm_module, bool relocatable, const CompileOptions& options, std::optional<int> shard_number) { { XLA_SCOPED_LOGGING_TIMER_IF( absl::StrCat( "GpuCompiler::RunBackend - Running LLVM verifier for ", (debug_module != nullptr ? debug_module->name() : "(unknown)")), VLOG_IS_ON(4) && !options.is_autotuning_compilation); llvm_module->getContext().setDiagnosticHandlerCallBack( NullDiagnosticHandler, nullptr); std::string err; llvm::raw_string_ostream err_stream(err); TF_RET_CHECK(!llvm::verifyModule(*llvm_module, &err_stream)) << "Invalid LLVM IR before optimizations:\n" << err_stream.str() << "\nThis probably indicates a bug in the HLO -> LLVM IR " "lowering. Rerun with --xla_dump_to to get the IR" << (debug_module ? absl::StrCat(" and looks for files with name containing: *", FilenameFor(*debug_module, "", ""), "*") : "."); } TF_ASSIGN_OR_RETURN( BackendCompileResult result, CompileTargetBinary(module_config, llvm_module, gpu_version, relocatable, debug_module, options)); const bool should_dump = DumpingEnabledForHloModule( debug_module ? debug_module->name() : "", module_config.debug_options()); if (should_dump) { if (debug_module) { llvm_ir::DumpIrIfEnabled( *debug_module, *llvm_module, true, shard_number.has_value() ? std::to_string(*shard_number) : ""); } else { LOG(ERROR) << "Dumping is not implemented since the file name cannot be " "inferred. Please implement (potentially MLIR) module -> " "filename heuristic."; } } if (user_post_optimization_hook_) { user_post_optimization_hook_(*llvm_module); } if (should_dump) { absl::string_view ptx = result.asm_text; if (debug_module) { DumpToFileInDirOrStdout(*debug_module, "", shard_number.has_value() ? (std::to_string(*shard_number) + ".ptx") : "ptx", ptx); } else { LOG(ERROR) << "Dumping is not implemented since the file name cannot be " "inferred. Please implement (potentially MLIR) module -> " "filename heuristic."; } } return result; } namespace { int CountFunctions(const llvm::Module& module) { int num_functions = 0; for (const llvm::Function& func : module.functions()) { if (!func.isDeclaration() && func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) { ++num_functions; } } return num_functions; } std::string SingleFunctionName(const llvm::Module& module) { std::string name; for (const llvm::Function& func : module.functions()) { if (!func.isDeclaration() && func.getLinkage() == llvm::GlobalValue::LinkageTypes::ExternalLinkage) { if (name.empty()) { name = func.getName().str(); } else { return ""; } } } return name; } } absl::StatusOr<GpuCompiler::BackendCompileResult> GpuCompiler::CompileAndLink( const HloModuleConfig& module_config, CompileModuleResults& compile_module_results, se::GpuComputeCapability gpu_version, se::StreamExecutor* stream_exec, const CompileOptions& options, const HloModule* debug_module) { llvm::Module* llvm_module = &*compile_module_results.llvm_module; bool force_module_split = module_config.debug_options().xla_llvm_force_inline_before_split(); if (force_module_split) { for (llvm::Function& func : llvm_module->functions()) { if (func.getNumUses() > 0 && !func.isDeclaration()) { VLOG(4) << absl::StrFormat("Inlining function %s with %d users.\n", func.getName().str(), func.getNumUses()); std::vector<llvm::CallInst*> calls_to_inline; for (auto* user : func.users()) { if (auto* call = llvm::dyn_cast<llvm::CallInst>(user)) { calls_to_inline.push_back(call); } } for (auto* call_to_inline : calls_to_inline) { llvm::InlineFunctionInfo inline_function_info; if (!llvm::InlineFunction(*call_to_inline, inline_function_info) .isSuccess()) { return absl::InternalError("Can not inline function " + func.getName().str()); }; } } } } llvm::DenseMap<llvm::StringRef, llvm::Constant*> const_initializer_map; llvm::Module& module_with_constants = (compile_module_results.llvm_module_constants == nullptr) ? *llvm_module : *compile_module_results.llvm_module_constants; for (llvm::GlobalVariable& gv : module_with_constants.globals()) { if (gv.hasName() && gv.isConstant() && gv.hasInitializer() && gv.hasExternalLinkage()) { llvm::Constant* initializer = gv.getInitializer(); unsigned int num_elements = 0; if (auto* caz = llvm::dyn_cast<llvm::ConstantAggregateZero>(initializer)) { num_elements = caz->getElementCount().getFixedValue(); } else if (auto* cds = llvm::dyn_cast<llvm::ConstantDataSequential>( initializer)) { num_elements = cds->getNumElements(); } if (num_elements > 0) { const_initializer_map[gv.getName()] = initializer; } } } llvm_ir::DumpIrIfEnabled(*debug_module, *llvm_module, false, "inlined"); absl::string_view cache_path = module_config.debug_options().xla_gpu_kernel_cache_file(); const bool use_cache = !cache_path.empty(); struct NamedModule { std::string name; std::unique_ptr<llvm::Module> module; }; std::vector<NamedModule> llvm_modules; MaybeOwningThreadPool thread_pool = CreateMaybeOwningThreadPool( module_config.debug_options() .xla_gpu_force_compilation_parallelism(), options.thread_pool, 1); int num_modules = CountFunctions(*llvm_module); if (thread_pool.get() != nullptr && !use_cache) { num_modules = std::max(1, std::min(thread_pool->NumThreads(), num_modules)); } if (compile_module_results.llvm_module_constants != nullptr) { llvm_modules.reserve(num_modules + 1); llvm_modules.push_back( {"", std::move(compile_module_results.llvm_module_constants)}); } else { llvm_modules.reserve(num_modules); } int single_function_module_count = 0; llvm::SplitModule( *llvm_module, num_modules, [&](std::unique_ptr<llvm::Module> module) { for (llvm::GlobalVariable& gv : module->globals()) { if (gv.hasName() && gv.isConstant() && !gv.hasInitializer() && const_initializer_map.count(gv.getName()) != 0) { gv.setInitializer(const_initializer_map[gv.getName()]); gv.setLinkage(llvm::GlobalValue::InternalLinkage); } } const std::string name = SingleFunctionName(*module); if (!name.empty()) { ++single_function_module_count; } llvm_modules.push_back({name, std::move(module)}); }, true, true); VLOG(2) << "Single-function cacheable modules: " << single_function_module_count << " / " << llvm_modules.size(); struct NamedCompileResult { std::string name; absl::StatusOr<BackendCompileResult> result; }; std::vector<NamedCompileResult> compile_results(llvm_modules.size()); if (thread_pool.get() != nullptr) { tsl::BlockingCounter counter(llvm_modules.size()); for (int i = 0; i < llvm_modules.size(); ++i) { thread_pool.get_mutable()->Schedule( [&compile_results, i, &llvm_modules, &counter, this, &module_config, &gpu_version, &debug_module, &options] { llvm::LLVMContext new_context; std::unique_ptr<llvm::Module> new_module = CopyToContext(*llvm_modules.at(i).module, new_context); compile_results.at(i) = { llvm_modules.at(i).name, CompileSingleModule(module_config, gpu_version, debug_module, new_module.get(), true, options, i)}; counter.DecrementCount(); }); } counter.Wait(); } else { for (int i = 0; i < llvm_modules.size(); ++i) { compile_results.at(i) = { llvm_modules.at(i).name, CompileSingleModule(module_config, gpu_version, debug_module, &*llvm_modules.at(i).module, true, options, i)}; } } std::string ptx_snippets; std::vector<std::vector<uint8_t>> binaries_to_link; binaries_to_link.reserve(compile_results.size()); std::vector<KernelReuseCache::NamedBinary> binaries_to_cache; binaries_to_cache.reserve(single_function_module_count); for (const auto& [name, maybe_result] : compile_results) { TF_ASSIGN_OR_RETURN(auto result, maybe_result); if (result.binary.empty()) { continue; } ptx_snippets += result.asm_text; ptx_snippets += "\n"; binaries_to_link.push_back(result.binary); if (!name.empty()) { binaries_to_cache.push_back({name, result.binary}); } } if (use_cache) { std::string resolved_path; if (!tsl::io::ResolveTestPrefixes(cache_path, resolved_path)) { return FailedPrecondition("File path can not be resolved: %s", cache_path); } const CompilationCacheProto& current_cache = compile_module_results.kernel_compilation_cache; const bool cache_file_exists = tsl::Env::Default()->FileExists(resolved_path).ok(); if (cache_file_exists) { int loaded_kernel_count = 0; for (const auto& [name, entry] : current_cache.entries()) { if (llvm_module->getFunction(name) != nullptr) { VLOG(5) << "Using the just compiled kernel for " << name; TF_RET_CHECK(entry.binary().empty()) << name << " is a just compiled kernel and is not expected to have a " "binary yet."; continue; } const uint8_t* binary = reinterpret_cast<const uint8_t*>(entry.binary().data()); binaries_to_link.push_back( std::vector<uint8_t>(binary, binary + entry.binary().size())); VLOG(5) << "Using " << name << " from cache: " << entry.binary().size(); ++loaded_kernel_count; } VLOG(2) << "Using " << loaded_kernel_count << " / " << current_cache.entries_size() << " cached kernels."; } if (!binaries_to_cache.empty()) { TF_RETURN_IF_ERROR( UpdateDiskKernelCache(resolved_path, cache_file_exists, current_cache, binaries_to_cache)); } } auto maybe_backend_result = LinkModules(gpu_version, stream_exec, std::move(binaries_to_link), module_config.debug_options()); if (!maybe_backend_result.ok()) { LOG(ERROR) << "The CUDA linking API did not work. Please use XLA_FLAGS=" "--xla_gpu_enable_llvm_module_compilation_parallelism=false " "to bypass it, but expect to get longer compilation time due " "to the lack of multi-threading. Original error: " << maybe_backend_result.status(); return maybe_backend_result.status(); } VLOG(4) << "Binary size after linking [B]: " << maybe_backend_result->size(); compile_module_results.kernel_compilation_cache.Clear(); return BackendCompileResult{ptx_snippets, std::move(*maybe_backend_result)}; } absl::StatusOr<GpuCompiler::CompileResultWithMetadata> GpuCompiler::CompileToBackendResult( HloModule* module, llvm::LLVMContext* llvm_context, se::StreamExecutor* executor, const CompileOptions& options, const se::DeviceDescription& gpu_device_info) { tsl::profiler::TraceMe traceme("GpuCompiler::CompileToBackendResult"); TF_RETURN_IF_ERROR(RunPreSchedulingPasses(module, executor)); TF_ASSIGN_OR_RETURN( ScheduleMetadata schedule_metadata, ScheduleGpuModule(module, pointer_size_, gpu_device_info)); TF_RETURN_IF_ERROR(RunPostSchedulingPipelines( module, schedule_metadata.scheduler_mem_limit, gpu_device_info)); TF_ASSIGN_OR_RETURN(se::Platform * platform, se::PlatformManager::PlatformWithId(PlatformId())); bool can_use_link_modules = (executor != nullptr); if (can_use_link_modules) { TF_ASSIGN_OR_RETURN(can_use_link_modules, CanUseLinkModules(module->config())); } const bool split_modules = can_use_link_modules && module->config() .debug_options() .xla_gpu_enable_llvm_module_compilation_parallelism(); const bool use_cache = split_modules && !module->config().debug_options().xla_gpu_kernel_cache_file().empty(); TF_ASSIGN_OR_RETURN( CompileModuleResults compile_module_results, CompileModuleToLlvmIr(module, llvm_context, target_triple_, data_layout_, platform->Name(), platform->id(), gpu_device_info, GetCanShareBuffer(), BufferSizeBytesFunction(), use_cache)); if (user_pre_optimization_hook_) { user_pre_optimization_hook_(*compile_module_results.llvm_module); if (compile_module_results.llvm_module_constants != nullptr) { user_pre_optimization_hook_( *compile_module_results.llvm_module_constants); } } llvm_ir::DumpIrIfEnabled(*module, *compile_module_results.llvm_module, false); if (compile_module_results.llvm_module_constants != nullptr) { llvm_ir::DumpIrIfEnabled(*module, *compile_module_results.llvm_module_constants, false, "constants"); } BackendCompileResult backend_result; if (split_modules) { TF_ASSIGN_OR_RETURN(backend_result, CompileAndLink(module->config(), compile_module_results, gpu_device_info.gpu_compute_capability(), executor, options, module)); } else { CHECK(compile_module_results.llvm_module_constants == nullptr); TF_ASSIGN_OR_RETURN( backend_result, CompileSingleModule(module->config(), gpu_device_info.gpu_compute_capability(), module, &*compile_module_results.llvm_module, false, options, std::nullopt)); } RecordXlaDeviceBinarySize(backend_result.binary.size()); if (DumpingEnabledForHloModule(*module)) { DumpToFileInDirOrStdout( *module, "", "thunk_sequence.txt", compile_module_results.executable->ToString(0)); } return CompileResultWithMetadata{std::move(backend_result), std::move(compile_module_results)}; } absl::StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend( std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec, const CompileOptions& options) { tsl::profiler::ScopedAnnotation backend_annotation{[&] { return absl::StrFormat("XlaCompileBackend:#module=%s,program_id=%d#", module->name(), module->unique_id()); }}; BinaryMap dnn_compiled_graphs; if (stream_exec) { TF_RETURN_IF_ERROR(RunCudnnCompilerPasses(module.get(), stream_exec, &dnn_compiled_graphs)); } const DebugOptions& debug_opts = module->config().debug_options(); TF_ASSIGN_OR_RETURN(TargetConfig gpu_target_config, GetTargetConfig(options, debug_opts, stream_exec)); if (DumpingEnabledForHloModule(*module)) { std::string textproto; tsl::protobuf::TextFormat::PrintToString(gpu_target_config.ToProto(), &textproto); DumpToFileInDirOrStdout(*module, "", "gpu_target_config.pbtxt", textproto); } if (!options.is_autotuning_compilation) { VLOG(1) << "Starting to compile HLO module " << module->name(); } XLA_SCOPED_LOGGING_TIMER_IF( absl::StrCat("GpuCompiler::RunBackend for ", module->name()), !options.is_autotuning_compilation); std::string slow_compilation_msg = absl::StrCat("Compiling module ", module->name()); auto slow_compile_alarm = SlowCompilationAlarm(slow_compilation_msg); if (options.is_autotuning_compilation) { if (module->config().debug_options().xla_embed_ir_in_executable()) { LOG(WARNING) << "Doing autotuning compilations with " "xla_embed_ir_in_executable wastes memory!"; } } llvm::LLVMContext llvm_context; const se::DeviceDescription& gpu_device_info = gpu_target_config.device_description; if (module->config().hlo_profiling_enabled() || VLOG_IS_ON(1)) { HloCostAnalysis::Options cost_analysis_options{ShapeSizeBytesFunction()}; cost_analysis_options.set_bytes_per_second( gpu_device_info.memory_bandwidth()); GpuHloCostAnalysis cost_analysis(cost_analysis_options, gpu_device_info); TF_RETURN_IF_ERROR(module->entry_computation()->Accept(&cost_analysis)); if (!options.is_autotuning_compilation) { VLOG(1) << "HLO memory read+written: " << tsl::strings::HumanReadableNumBytes( cost_analysis.bytes_accessed()); } if (module->config().hlo_profiling_enabled()) { LOG(ERROR) << "--xla_hlo_profile for GPU is unsupported."; } } TF_ASSIGN_OR_RETURN( CompileResultWithMetadata res, CompileToBackendResult(module.get(), &llvm_context, stream_exec, options, gpu_device_info)); if (DumpingEnabledForHloModule(*module)) { DumpToFileInDirOrStdout( *module, "", "thunk_sequence.txt", res.compile_module_results.executable->ToString(0)); } bool embed_ir_in_executable = module->config().debug_options().xla_embed_ir_in_executable(); int64_t debug_buffer_assignment_show_max = module->config().debug_options().xla_debug_buffer_assignment_show_max(); tsl::profiler::ScopedAnnotation annotation([&] { return absl::StrFormat("XlaCreateGpuExecutable:#module=%s#", module->name()); }); TF_ASSIGN_OR_RETURN( auto gpu_executable, GpuExecutable::Create(GpuExecutable::Params{ (options.is_autotuning_compilation && !res.backend_result.binary.empty()) ? std::string() : std::move(res.backend_result.asm_text), std::move(res.backend_result.binary), std::move(dnn_compiled_graphs), gpu_device_info.gpu_compute_capability(), std::move(res.compile_module_results.executable), std::move(res.compile_module_results.constants), std::move(res.compile_module_results.output_info), std::move(res.compile_module_results.module_name), std::move(res.compile_module_results.output_shape), (res.compile_module_results.use_original_allocations ? std::optional<std::vector<BufferAllocation>>() : std::move(res.compile_module_results.allocations)), std::move(res.compile_module_results.buffer_assignment), debug_buffer_assignment_show_max, options.is_autotuning_compilation ? std::unique_ptr<HloModule>() : std::move(module), !options.is_autotuning_compilation})); if (embed_ir_in_executable) { std::string ir_module_string_before_opt = llvm_ir::DumpToString(res.compile_module_results.llvm_module.get()); gpu_executable->set_ir_module_string(ir_module_string_before_opt); DCHECK_NE("", ir_module_string_before_opt); } IncrementCompiledProgramsCount(); if (!options.is_autotuning_compilation && gpu_executable->has_module()) { auto hlo_proto = std::make_unique<HloProto>(); *hlo_proto->mutable_buffer_assignment() = gpu_executable->buffer_assignment()->ToProto(); gpu_executable->set_hlo_proto(std::move(hlo_proto)); gpu_executable->set_debug_info( gpu_executable->buffer_assignment()->GetStats().ToString()); } return static_cast<std::unique_ptr<Executable>>(std::move(gpu_executable)); } absl::StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>> GpuCompiler::CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group, const AotCompilationOptions& options) { CHECK_EQ(options.PlatformId(), PlatformId()); std::vector<std::unique_ptr<HloModule>> modules = module_group->ConsumeModules(); std::vector<std::unique_ptr<HloModule>> optimized_modules; optimized_modules.reserve(modules.size()); for (std::unique_ptr<HloModule>& module : modules) { if (!module->has_schedule()) { tsl::profiler::ScopedAnnotation annotation{[&] { return absl::StrFormat("XlaCompile:#module=%s,program_id=%d#", module->name(), module->unique_id()); }}; CompileOptions compile_options; compile_options.device_allocator = options.device_allocator(); compile_options.target_config = options.target_config(); TF_ASSIGN_OR_RETURN( std::unique_ptr<HloModule> optimized_module, RunHloPasses(std::move(module), options.executor(), compile_options)); optimized_modules.push_back(std::move(optimized_module)); } else { optimized_modules.push_back(std::move(module)); } } modules = std::move(optimized_modules); std::vector<std::unique_ptr<AotCompilationResult>> results; const std::optional<Compiler::TargetConfig>& target_config = options.target_config(); CHECK(target_config.has_value() || options.executor() != nullptr); const se::DeviceDescription& gpu_device_info = target_config.has_value() ? target_config->device_description : options.executor()->GetDeviceDescription(); for (const std::unique_ptr<HloModule>& module : modules) { llvm::LLVMContext llvm_context; TF_ASSIGN_OR_RETURN( CompileResultWithMetadata res, CompileToBackendResult(module.get(), &llvm_context, options.executor(), {options.device_allocator()}, gpu_device_info)); TF_ASSIGN_OR_RETURN( results.emplace_back(), GpuThunkAotCompilationResult::FromModule( module.get(), res.compile_module_results.buffer_assignment.get(), res.backend_result.asm_text, res.backend_result.binary, res.backend_result.dnn_compiled_graphs)); } return std::move(results); } HloCostAnalysis::ShapeSizeFunction GpuCompiler::ShapeSizeBytesFunction() const { return [pointer_size = pointer_size_](const Shape& shape) { return GetSizeOfShape(shape, pointer_size); }; } absl::StatusOr<std::unique_ptr<AotCompilationResult>> GpuCompiler::Export( Executable* executable) const { auto* gpu_executable = tensorflow::down_cast<GpuExecutable*>(executable); if (!gpu_executable) return Internal("GpuExecutable is null"); return GpuThunkAotCompilationResult::FromModule( &gpu_executable->module(), gpu_executable->buffer_assignment(), gpu_executable->text(), gpu_executable->binary(), gpu_executable->dnn_compiled_graphs()); } absl::Status GpuCompiler::RunPreSchedulingPasses( HloModule* module, se::StreamExecutor* stream_exec) { HloPassPipeline pipeline("pre-scheduling-passes"); pipeline.AddPass<FusionWrapper>(); return pipeline.Run(module).status(); } HloCostAnalysis::Options CreateHloAnalysisOpts( const HloModule& module, const se::DeviceDescription& gpu_device_info, ShapeSizeFn shape_size_fn) { HloCostAnalysis::Options hlo_cost_analysis_options; hlo_cost_analysis_options.shape_size = shape_size_fn; std::optional<HloRematerialization::HostMemoryOffloadConfig> offloading_config = std::nullopt; if (module.config().debug_options().xla_gpu_enable_host_memory_offloading()) { constexpr float kGiga = 1e+9; constexpr float kFma = 2; float flops_per_sec = gpu_device_info.core_count() * gpu_device_info.fpus_per_core() * gpu_device_info.clock_rate_ghz() * kGiga * kFma; int64_t host_memory_space_color = static_cast<int64_t>(se::MemoryType::kHost); hlo_cost_analysis_options.set_flops_per_second(flops_per_sec); hlo_cost_analysis_options.set_transcendentals_per_second(flops_per_sec); offloading_config = std::make_optional<HloRematerialization::HostMemoryOffloadConfig>( host_memory_space_color, gpu_device_info.memory_bandwidth(), gpu_device_info.memory_bandwidth()); } return hlo_cost_analysis_options; } HloRematerialization::Options CreateRematOpts( const HloModule& module, const se::DeviceDescription& gpu_device_info, HloCostAnalysis& hlo_cost_analysis, int64_t scheduler_mem_limit) { bool enable_offloading = module.config().debug_options().xla_gpu_enable_host_memory_offloading(); std::optional<HloRematerialization::HostMemoryOffloadConfig> offloading_config = std::nullopt; if (enable_offloading) { int64_t host_memory_space_color = static_cast<int64_t>(se::MemoryType::kHost); offloading_config = std::make_optional<HloRematerialization::HostMemoryOffloadConfig>( host_memory_space_color, gpu_device_info.memory_bandwidth(), gpu_device_info.memory_bandwidth()); } HloRematerialization::RematerializationModeConfig rematerialization_mode_config(true, true, enable_offloading); HloRematerialization::Options options( hlo_cost_analysis, rematerialization_mode_config, scheduler_mem_limit, 1, 1, 0, nullptr, offloading_config); return options; } absl::Status GpuCompiler::RunPostSchedulingPipelines( HloModule* module, int64_t scheduler_mem_limit, const se::DeviceDescription& gpu_device_info) const { TF_RETURN_IF_ERROR( RunPostSchedulingCopyInsertion(module, GetCanShareBuffer())); HloPassPipeline main_pipeline("post-scheduling-passes"); HloPredicate is_nop = HloPredicateIsOp<HloOpcode::kParameter, HloOpcode::kConstant, HloOpcode::kBitcast, HloOpcode::kGetTupleElement>; { HloPassPipeline& pipeline = main_pipeline.AddPass<HloPassPipeline>("async-to-sync-converter"); if (module->config() .debug_options() .xla_gpu_enable_pipelined_collectives() || module->config().debug_options().xla_gpu_enable_pipelined_p2p()) { pipeline.AddPass<PipelinedP2PRewriter>(); } pipeline.AddPass<GpuConvertAsyncCollectivesToSync>(is_nop); } HloRematerialization::RematerializationSizes sizes; HloCostAnalysis::Options hlo_cost_analysis_opts = CreateHloAnalysisOpts(*module, gpu_device_info, ShapeSizeBytesFunction()); HloCostAnalysis hlo_cost_analysis(hlo_cost_analysis_opts); HloRematerialization::Options remat_opts = CreateRematOpts( *module, gpu_device_info, hlo_cost_analysis, scheduler_mem_limit); { HloPassPipeline& pipeline = main_pipeline.AddPass<HloPassPipeline>("remat-pipeline"); pipeline.AddPass<HloRematerialization>(remat_opts, sizes); pipeline.AddPass<StreamAttributeAnnotator>(); pipeline.AddPass<OptimizationBarrierExpander>(); } { HloPassPipeline& pipeline = main_pipeline.AddPass<HloPassPipeline>("fusion-wrapper"); pipeline.AddPass<FusionWrapper>(); } { HloPassPipeline& pipeline = main_pipeline.AddPass<HloPassPipeline>("command-buffer-scheduling"); pipeline.AddPass<CommandBufferScheduling>(gpu_device_info); pipeline.AddPass<SanitizeConstantNames>(); } if (module->config().debug_options().xla_gpu_enable_pgle_accuracy_checker()) { AddHloVerifier( &main_pipeline, module->config().debug_options().xla_experimental_ignore_channel_id(), HloVerifierOpts{}.VerifyInstructionNameUnchanged()); } return main_pipeline.Run(module).status(); } absl::Status GpuCompiler::LoadAutotuneResultsFromFile( const DebugOptions& debug_options) { if (absl::string_view file_path = debug_options.xla_gpu_load_autotune_results_from(); !file_path.empty()) { static absl::once_flag once; absl::Status status = absl::OkStatus(); absl::call_once(once, [&file_path, &status] { status = AutotunerUtil::LoadAutotuneResultsFromFile(file_path); }); TF_RETURN_IF_ERROR(status); } return absl::OkStatus(); } absl::Status GpuCompiler::SerializeAutotuneResultsToFile( const DebugOptions& debug_options) { if (absl::string_view file_path = debug_options.xla_gpu_dump_autotune_results_to(); !file_path.empty()) { TF_RETURN_IF_ERROR( AutotunerUtil::SerializeAutotuneResultsToFile(file_path)); } return absl::OkStatus(); } absl::StatusOr<std::unique_ptr<AotCompilationResult>> GpuCompiler::LoadAotCompilationResult( const std::string& serialized_aot_result) { return LoadAotCompilationResultStatic(serialized_aot_result); } absl::StatusOr<std::unique_ptr<AotCompilationResult>> GpuCompiler::LoadAotCompilationResultStatic( const std::string& serialized_aot_result) { return GpuThunkAotCompilationResult::FromString(serialized_aot_result); } } }
#include "xla/service/gpu/gpu_compiler.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <limits> #include <memory> #include <string> #include <utility> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/autotune_results.pb.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_module_group.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/primitive_util.h" #include "xla/service/compiler.h" #include "xla/service/executable.h" #include "xla/service/gpu/autotuning/autotuner_util.h" #include "xla/service/gpu/gpu_hlo_schedule.h" #include "xla/service/gpu/metrics.h" #include "xla/service/hlo_module_config.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/service/xla_debug_info_manager.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/casts.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; using ::testing::IsEmpty; using ::testing::Not; using ::testing::TempDir; class GpuCompilerTest : public HloTestBase { public: absl::Status Schedule(HloModule* module) { auto compiler = backend().compiler(); const se::DeviceDescription& gpu_device_info = backend().default_stream_executor()->GetDeviceDescription(); TF_RETURN_IF_ERROR(ScheduleGpuModule(module, 4, gpu_device_info).status()); return tensorflow::down_cast<GpuCompiler*>(compiler) ->RunPostSchedulingPipelines(module, 4 * 1024 * 1024, gpu_device_info); } const stream_executor::GpuComputeCapability& GpuComputeComp() { return backend() .default_stream_executor() ->GetDeviceDescription() .gpu_compute_capability(); } }; TEST_F(GpuCompilerTest, CompiledProgramsCount) { const char* hlo_text = R"( HloModule test ENTRY main { p = f32[10]{0} parameter(0) ROOT neg = f32[10]{0} negate(p) } )"; auto module = ParseAndReturnVerifiedModule(hlo_text).value(); ResetCompiledProgramsCountForTesting(); std::unique_ptr<Executable> executable = backend() .compiler() ->RunBackend(std::move(module), backend().default_stream_executor(), {nullptr, nullptr, {}, false}) .value(); EXPECT_EQ(GetCompiledProgramsCount(), 1); } TEST_F(GpuCompilerTest, GenerateDebugInfoForNonAutotuningCompilations) { const char* hlo_text = R"( HloModule test ENTRY main { p = f32[10]{0} parameter(0) ROOT neg = f32[10]{0} negate(p) } )"; auto module = ParseAndReturnVerifiedModule(hlo_text).value(); std::unique_ptr<Executable> executable = backend() .compiler() ->RunBackend(std::move(module), backend().default_stream_executor(), {nullptr, nullptr, {}, false}) .value(); EXPECT_TRUE(XlaDebugInfoManager::Get()->TracksModule( executable->module().unique_id())); } TEST_F(GpuCompilerTest, DoesNotGenerateDebugInfoForAutotuningCompilations) { const char* hlo_text = R"( HloModule test ENTRY main { p = f32[10]{0} parameter(0) ROOT neg = f32[10]{0} negate(p) } )"; auto module = ParseAndReturnVerifiedModule(hlo_text).value(); int module_id = module->unique_id(); std::unique_ptr<Executable> executable = backend() .compiler() ->RunBackend(std::move(module), backend().default_stream_executor(), {nullptr, nullptr, {}, true}) .value(); EXPECT_FALSE(XlaDebugInfoManager::Get()->TracksModule(module_id)); } TEST_F(GpuCompilerTest, CopyInsertionFusion) { const char* hlo_text = R"( HloModule cluster ENTRY main { cst = f32[1]{0} constant({0}) ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cst, cst, cst, cst) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{0, 0})); auto module = ParseAndReturnVerifiedModule(hlo_text).value(); std::unique_ptr<HloModule> compiled_module = backend() .compiler() ->RunHloPasses(module->Clone(), backend().default_stream_executor(), nullptr) .value(); VLOG(2) << compiled_module->ToString(); size_t total_fusion_instrs = 0; for (const HloInstruction* instr : compiled_module->entry_computation()->instructions()) { if (instr->opcode() == HloOpcode::kFusion) { ++total_fusion_instrs; } } EXPECT_EQ(total_fusion_instrs, 1); const HloInstruction* entry_root = compiled_module->entry_computation()->root_instruction(); EXPECT_THAT( entry_root, GmockMatch(m::Tuple( m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion())))); } TEST_F(GpuCompilerTest, CanRunScheduledModules) { HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_disable_all_hlo_passes(true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(R"( HloModule m, is_scheduled=true w { p = s8[] parameter(0) ROOT n = s8[] negate(p) } ENTRY e { p = s8[] parameter(0) ROOT _ = s8[] fusion(p), kind=kLoop, calls=w })", config)); EXPECT_TRUE(Run(std::move(module), true)); } TEST_F(GpuCompilerTest, NonFusedInstructionsAreWrapped) { HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p = f32[2,4,4] parameter(0) ROOT _ = f32[2,4,4]{2,1,0} transpose(p), dimensions={0,2,1} })", config)); config.set_debug_options(debug_options); std::unique_ptr<Executable> executable = backend() .compiler() ->RunBackend(std::move(module), backend().default_stream_executor(), {nullptr, nullptr, {}, false}) .value(); HloModule& compiled_module = executable->module(); const HloInstruction* entry_root = compiled_module.entry_computation()->root_instruction(); EXPECT_THAT(entry_root, GmockMatch(m::Fusion())); } class PersistedAutotuningTest : public HloTestBase { protected: static constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { p0 = f16[1,16,17,3] parameter(0) p1 = s8[16,17,3] parameter(1) cp1 = f16[16,17,3] convert(p1) ROOT _ = f16[1,16,16] dot(p0, cp1), lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2} })"; std::string GetUniqueTempFilePath(absl::string_view suffix) { std::string filename = TempDir(); CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename, std::string(suffix))); return filename; } std::string ExpectToReadNonEmptyFile(absl::string_view file_path) { std::string str; tsl::Env* env = tsl::Env::Default(); TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str)); EXPECT_THAT(str, Not(IsEmpty())); return str; } DebugOptions GetDebugOptionsForTest() override { DebugOptions options = HloTestBase::GetDebugOptionsForTest(); options.set_xla_gpu_dump_autotune_results_to( xla_gpu_dump_autotune_results_to_); options.set_xla_gpu_load_autotune_results_from( xla_gpu_load_autotune_results_from_); return options; } std::string xla_gpu_dump_autotune_results_to_; std::string xla_gpu_load_autotune_results_from_; }; TEST_F(PersistedAutotuningTest, WriteResultsOnEachCompilation) { constexpr absl::string_view kInvalidTextProto = "Invalid!"; xla_gpu_dump_autotune_results_to_ = GetUniqueTempFilePath(".txt"); TF_EXPECT_OK(GetOptimizedModule(kHloText).status()); { std::string autotune_results_str = ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_); AutotuneResults results; EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str, &results)); } tsl::Env* env = tsl::Env::Default(); TF_EXPECT_OK(tsl::WriteStringToFile(env, xla_gpu_dump_autotune_results_to_, kInvalidTextProto)); TF_EXPECT_OK(GetOptimizedModule(kHloText).status()); { std::string autotune_results_str = ExpectToReadNonEmptyFile(xla_gpu_dump_autotune_results_to_); AutotuneResults results; EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str, &results)); } } int64_t CountCopies(const HloComputation& computation) { int64_t count = 0; for (const auto& instruction : computation.instructions()) { if (instruction->opcode() == HloOpcode::kCopy) { count++; } } return count; } int64_t CountCopies(const HloModule& module) { int64_t count = 0; for (const auto& computation : module.computations()) { count += CountCopies(*computation); } return count; } TEST_F(GpuCompilerTest, RemovesUnnecessaryCopyAfterScheduling) { const absl::string_view hlo_string = R"( HloModule all_gather_overlapping condition { input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0) ROOT cond = pred[] get-tuple-element(input_tuple), index=2 } body { input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0) param_0 = f32[1,128] get-tuple-element(input_tuple), index=0 param_1 = f32[2,128] get-tuple-element(input_tuple), index=1 cond = pred[] get-tuple-element(input_tuple), index=2 c0 = f32[] constant(0) splat_c0 = f32[1,128] broadcast(c0), dimensions={} add = f32[1,128] add(splat_c0, param_0) all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128} all-gather-done = f32[2,128] all-gather-done(all-gather-start) ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, all-gather-done, cond) } ENTRY main { param_0 = f32[1,128] parameter(0) param_1 = f32[2,128] parameter(1) param_2 = pred[] parameter(2) tuple = (f32[1,128], f32[2,128], pred[]) tuple(param_0, param_1, param_2) ROOT while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, GetOptimizedModule(hlo_string)); EXPECT_EQ(CountCopies(*module), 7); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* while_op = root->operand(0)->operand(0); EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(), HloOpcode::kCopy); TF_ASSERT_OK(Schedule(module.get())); EXPECT_EQ(CountCopies(*module), 4); module->entry_computation()->root_instruction(); while_op = root->operand(0)->operand(0); EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(), HloOpcode::kAllGatherDone); } TEST_F(GpuCompilerTest, GemmFusionIsNoOpWhenGemmFusionAutotunerFallsBackToCublas) { auto cc = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); if (!cc.IsAtLeastAmpere()) { GTEST_SKIP() << "Autotuning results have only been generated for Ampere " << "and Hopper GPUs"; } const absl::string_view hlo_string = R"( HloModule test ENTRY main { param_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} parameter(0) param_1 = bf16[4,3,32,1024]{3,2,1,0} parameter(1) param_2 = s32[] parameter(2) constant_0 = s32[] constant(0) dynamic-slice_0 = bf16[1,3,32,1024]{3,2,1,0} dynamic-slice(param_1, param_2, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,3,32,1024} reshape_0 = bf16[3,32,1024]{2,1,0} reshape(dynamic-slice_0) broadcast_0 = bf16[3,32,1024,4,1024]{2,1,4,3,0} broadcast(reshape_0), dimensions={0,1,2} add_0 = bf16[3,32,1024,4,1024]{4,3,2,1,0} add(param_0, broadcast_0) transpose_0 = bf16[3,4,1024,32,1024]{2,1,4,3,0} transpose(add_0), dimensions={0,3,4,1,2} slice_0 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[0:1], [0:4], [0:1024], [0:32], [0:1024]} reshape_1 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_0) copy_0 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_1) constant_1 = bf16[] constant(0.08838) broadcast_1 = bf16[4,1024,32,1024]{3,2,1,0} broadcast(constant_1), dimensions={} multiply_0 = bf16[4,1024,32,1024]{3,2,1,0} multiply(copy_0, broadcast_1) slice_1 = bf16[1,4,1024,32,1024]{4,3,2,1,0} slice(transpose_0), slice={[1:2], [0:4], [0:1024], [0:32], [0:1024]} reshape_2 = bf16[4,1024,32,1024]{3,2,1,0} reshape(slice_1) copy_1 = bf16[4,1024,32,1024]{3,2,1,0} copy(reshape_2) ROOT dot_0 = bf16[4,32,1024,1024]{3,2,1,0} dot(multiply_0, copy_1), lhs_batch_dims={0,2}, lhs_contracting_dims={3}, rhs_batch_dims={0,2}, rhs_contracting_dims={3} } )"; HloModuleConfig config; DebugOptions triton_enabled_debug_options = GetDebugOptionsForTest(); triton_enabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false); triton_enabled_debug_options .set_xla_gpu_require_complete_aot_autotune_results(true); config.set_debug_options(triton_enabled_debug_options); config.set_replica_count(1); config.set_num_partitions(1); std::string path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu", "gpu_compiler_test_autotune_db.textproto"); TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(path)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string, config)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_enabled_module, GetOptimizedModule(std::move(module))); AutotunerUtil::ClearAutotuneResults(); DebugOptions triton_disabled_debug_options = GetDebugOptionsForTest(); triton_disabled_debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false); triton_disabled_debug_options.set_xla_gpu_enable_triton_gemm(false); config.set_debug_options(triton_disabled_debug_options); TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(hlo_string, config)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> triton_disabled_module, GetOptimizedModule(std::move(module))); const HloInstruction* root = triton_enabled_module->entry_computation()->root_instruction(); const HloInstruction* custom_op = root->operand(0)->operand(0); EXPECT_TRUE(custom_op->IsCustomCall("__cublas$gemm")); EXPECT_EQ(triton_enabled_module->computation_count(), triton_disabled_module->computation_count()); } class FloatNormalizationTest : public GpuCompilerTest, public ::testing::WithParamInterface< std::pair<PrimitiveType, PrimitiveType>> {}; INSTANTIATE_TEST_SUITE_P( Fp8s, FloatNormalizationTest, ::testing::Values( std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN), std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN), std::make_pair(PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2), std::make_pair(PrimitiveType::F8E5M2, PrimitiveType::F8E5M2))); TEST_P(FloatNormalizationTest, Fp8Normalization) { const PrimitiveType lhs_type = GetParam().first; const PrimitiveType rhs_type = GetParam().second; const std::string lhs_name = primitive_util::LowercasePrimitiveTypeName(lhs_type); const std::string rhs_name = primitive_util::LowercasePrimitiveTypeName(rhs_type); const std::string module_str = absl::Substitute(R"( HloModule sch ENTRY main { parameter = $0[1600,1600]{1,0} parameter(0) parameter.1 = $1[1600,1600]{1,0} parameter(1) neg = $1[1600,1600]{1,0} negate(parameter.1) dot = f16[1600,1600]{1,0} dot(parameter,neg), lhs_contracting_dims={1}, rhs_contracting_dims={0} constant = f16[] constant(0) broadcast = f16[1600,1600]{1,0} broadcast(constant), dimensions={} ROOT maximum = f16[1600,1600]{1,0} maximum(dot,broadcast) })", lhs_name, rhs_name); auto optimize_module = [&](bool enable_triton, bool enable_blas, bool enable_blas_fallback) -> absl::StatusOr<std::unique_ptr<HloModule>> { HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_cublas_fallback(enable_blas_fallback); debug_options.set_xla_gpu_enable_triton_gemm(enable_triton); if (!enable_blas) { debug_options.add_xla_disable_hlo_passes("cublas-gemm-rewriter"); } config.set_debug_options(debug_options); config.set_num_partitions(1); TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, config)); return GetOptimizedModule(std::move(module)); }; auto cc = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); const std::string triton_keep_types = absl::Substitute( R"(CHECK: fusion($0{{[^)]*}}, $1{{[^)]*}}){{.*}}"kind":"__triton_gemm")", lhs_name, rhs_name); const std::string cublaslt_keep_types = absl::Substitute( R"(CHECK: custom-call($0{{[^)]*}}, $1{{[^)]*}}){{.*}}custom_call_target="__cublas$$lt$$matmul$$f8")", lhs_name, rhs_name); const std::string cublas_convert_to_f16 = R"(CHECK: custom-call(f16{{[^)]*}}, f16{{[^)]*}}){{.*}}custom_call_target="__cublas$gemm")"; const std::string fallback_convert_to_f16 = R"(CHECK: dot(f16{{[^)]*}}, f16{{[^)]*}}))"; { TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_fallback, optimize_module(true, true, false)); const std::string triton_expected_check = (cc.IsAtLeastHopper() || (cc.IsAtLeastAmpere() && lhs_type == F8E5M2 && rhs_type == F8E5M2)) ? triton_keep_types : cublas_convert_to_f16; TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matched, RunFileCheck(optimized_module_no_fallback->ToString(), triton_expected_check)); EXPECT_TRUE(filecheck_matched); } { TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_no_triton, optimize_module(false, true, true)); const std::string blas_expected_check = (cc.IsAtLeastHopper() && !(lhs_type == F8E5M2 && rhs_type == F8E5M2)) ? cublaslt_keep_types : cublas_convert_to_f16; TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched, RunFileCheck(optimized_module_no_triton->ToString(), blas_expected_check)); EXPECT_TRUE(filecheck_matched); } { TF_ASSERT_OK_AND_ASSIGN(auto optimized_module_nothing, optimize_module(false, false, false)); TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched, RunFileCheck(optimized_module_nothing->ToString(), fallback_convert_to_f16)); EXPECT_TRUE(filecheck_matched); } } TEST_F(GpuCompilerTest, CollectivePermuteDecompositionAndPipelining) { const char* kModuleStr = R"( HloModule cp cond { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 ub = u32[] constant(11) ROOT result = pred[] compare(count, ub), direction=LT } body { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 send-data = get-tuple-element(%param), index=1 recv-data = f32[1, 1024, 1024] collective-permute(send-data), source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}, channel_id=1 c1 = u32[] constant(1) new_count = u32[] add(count, c1) replica = u32[] replica-id() c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) p = f32[1, 1024, 1024] broadcast(conv), dimensions={} b = f32[1, 1024, 1024] add(p, recv-data) c = f32[1, 1024, 1024] multiply(b, b) d = f32[1, 1024, 1024] tan(c) s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init) while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1 } )"; const char* kExpected = R"( CHECK: recv-done CHECK-SAME: channel_id=[[CHANNEL_ID:[0-9]+]] CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: send-done CHECK-SAME: channel_id=[[CHANNEL_ID]] CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %[[CUSTOM_CALL:.*]] = custom-call CHECK: %[[AFTER_ALL:.*]] = after-all CHECK: %[[RESULT_RECV:.*]] = recv(%[[AFTER_ALL]]) CHECK-SAME: channel_id=[[CHANNEL_ID]] CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0", CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}}, CHECK-SAME: control-predecessors={%[[CUSTOM_CALL]]} CHECK: %[[RESULT_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[AFTER_ALL]]) CHECK-SAME: channel_id=1 CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0", CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}}, CHECK-SAME: control-predecessors={%[[RESULT_RECV]]} CHECK: ROOT CHECK-SAME: %[[RESULT_RECV]] CHECK: ENTRY CHECK: %[[ENTRY_AFTER_ALL:.*]] = after-all CHECK: %[[ENTRY_RECV:.*]] = recv(%[[ENTRY_AFTER_ALL]]) CHECK-SAME: channel_id=[[CHANNEL_ID]] CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0", CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}} CHECK: %[[ENTRY_SEND:.*]] = send(%[[SOME_SEND_ARG:.*]], %[[ENTRY_AFTER_ALL]]) CHECK-SAME: channel_id=1 CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0", CHECK-SAME{LITERAL}: _xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3},{3,4}}}, CHECK-SAME: control-predecessors={%[[ENTRY_RECV]]} CHECK: %[[WHILE_INIT:.*]] = tuple CHECK-SAME: %[[ENTRY_SEND]] CHECK: while(%[[WHILE_INIT]]) CHECK: recv-done CHECK-SAME: channel_id=[[CHANNEL_ID]] CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: send-done CHECK-SAME: channel_id=[[CHANNEL_ID]] CHECK-SAME: frontend_attributes={_xla_send_recv_pipeline="0"} )"; HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_latency_hiding_scheduler(true); debug_options.set_xla_gpu_collective_permute_decomposer_threshold(1); debug_options.set_xla_gpu_enable_pipelined_p2p(true); debug_options.set_xla_gpu_enable_triton_gemm(false); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kModuleStr, config)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(std::move(module))); TF_ASSERT_OK(Schedule(optimized_module.get())); HloPrintOptions options; options.set_print_operand_shape(false); options.set_print_result_shape(false); TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matched, RunFileCheck(optimized_module->ToString(options), kExpected)); EXPECT_TRUE(filecheck_matched); } class KernelCacheTest : public HloTestBase { public: void SetUp() override { CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_name_)); HloModuleConfig config; config.set_debug_options(GetDebugOptionsForTest()); TF_ASSERT_OK_AND_ASSIGN(bool can_use_link_modules, dynamic_cast<GpuCompiler*>(backend().compiler()) ->CanUseLinkModules(config)); if (!can_use_link_modules) { GTEST_SKIP() << "Caching compiled kernels requires support of linking."; } } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_kernel_cache_file(cache_file_name_); debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(true); return debug_options; } bool CacheFileExists() { if (!tsl::Env::Default()->FileExists(cache_file_name_).ok()) { return false; } return true; } int CacheEntryCount() { if (!CacheFileExists()) { return 0; } std::string serialized; TF_EXPECT_OK(tsl::ReadFileToString(tsl::Env::Default(), cache_file_name_, &serialized)); CompilationCacheProto proto; EXPECT_TRUE(proto.ParseFromString(std::string(serialized))); return proto.entries_size(); } std::string cache_file_name_; static constexpr absl::string_view kHloText = R"( ENTRY e { p = s8[] parameter(0) c = s8[] constant(8) ROOT _ = s8[] add(p, c) })"; }; TEST_F(KernelCacheTest, CacheIsGenerated) { EXPECT_FALSE(CacheFileExists()); EXPECT_TRUE(Run(kHloText, false)); EXPECT_EQ(CacheEntryCount(), 1); EXPECT_TRUE(Run(kHloText, false)); EXPECT_EQ(CacheEntryCount(), 1); } TEST_F(KernelCacheTest, NoCacheIsGeneratedWithoutCompiledKernels) { EXPECT_FALSE(CacheFileExists()); EXPECT_TRUE(Run(R"( ENTRY e { a = f32[5,5] parameter(0) ROOT _ = f32[5,5] custom-call(a, a), custom_call_target="__cublas$gemm", backend_config="{ \"gemm_backend_config\": {\"alpha_real\":1,\"beta\":0,\"dot_dimension_numbers\":{\"lhs_contracting_dimensions\":[\"1\"],\"rhs_contracting_dimensions\":[\"0\"],\"lhs_batch_dimensions\":[],\"rhs_batch_dimensions\":[]},\"alpha_imag\":0,\"precision_config\":{\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]},\"epilogue\":\"DEFAULT\"}}" })", false)); EXPECT_FALSE(CacheFileExists()); } TEST_F(KernelCacheTest, CacheGrowsWithNewKernels) { EXPECT_FALSE(CacheFileExists()); EXPECT_TRUE(Run(kHloText, false)); EXPECT_EQ(CacheEntryCount(), 1); EXPECT_TRUE(Run(R"( ENTRY e { p = s8[] parameter(0) ROOT _ = s8[] multiply(p, p) })", false)); EXPECT_EQ(CacheEntryCount(), 2); } TEST_F(KernelCacheTest, AllKernelsAreCachedBecauseSplitModuleUsesRoundRobin) { EXPECT_FALSE(CacheFileExists()); EXPECT_TRUE(Run(R"( ENTRY e { p = s8[] parameter(0) n = s8[] negate(p) a = s8[] add(n, n) s = s8[] subtract(p, a) ROOT _ = s8[] multiply(s, p) })", false)); EXPECT_EQ(CacheEntryCount(), 4); } TEST_F(KernelCacheTest, CachingWorksWithLoadedExecutables) { const std::string kHloAdd1 = R"( add1 { p = s32[] parameter(0) c = s32[] constant(1) ROOT a = s32[] add(p, c) } ENTRY e { p = s32[] parameter(0) ROOT r = s32[] fusion(p), kind=kLoop, calls=add1 })"; const std::string kHloAdd2 = R"( add2 { p = s32[] parameter(0) c = s32[] constant(2) ROOT a = s32[] add(p, c) } ENTRY e { p = s32[] parameter(0) ROOT r = s32[] fusion(p), kind=kLoop, calls=add2 })"; TF_ASSERT_OK_AND_ASSIGN(se::Platform * platform, se::PlatformManager::PlatformWithName("cuda")); TF_ASSERT_OK_AND_ASSIGN(se::StreamExecutor * stream_exec, platform->ExecutorForDevice(0)); Compiler* compiler = backend().compiler(); AotCompilationOptions aot_options(compiler->PlatformId()); aot_options.set_executor(stream_exec); auto test = [this, &compiler, &aot_options](absl::string_view hlo, int input, int expected_result) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo)); auto module_group = std::make_unique<HloModuleGroup>(std::move(module)); TF_ASSERT_OK_AND_ASSIGN( std::vector<std::unique_ptr<AotCompilationResult>> aot_results, compiler->CompileAheadOfTime(std::move(module_group), aot_options)); TF_ASSERT_OK_AND_ASSIGN(std::string serialized_aot_result, aot_results[0]->SerializeAsString()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<AotCompilationResult> aot_result, compiler->LoadAotCompilationResult(serialized_aot_result)); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<Executable> executable, aot_result->LoadExecutable(compiler, aot_options.executor())); const xla::Literal literal_input = xla::LiteralUtil::CreateR0<int32_t>(input); const xla::Literal literal_expected_result = xla::LiteralUtil::CreateR0<int32_t>(expected_result); TF_ASSERT_OK_AND_ASSIGN(Literal result, GetHloRunner().value()->ExecuteWithExecutable( executable.get(), {&literal_input})); EXPECT_TRUE(LiteralTestUtil::Equal(result, literal_expected_result)); }; test(kHloAdd1, 1, 2); test(kHloAdd2, 1, 3); test(kHloAdd2, 1, 3); } class KernelCacheTestSingleThreaded : public KernelCacheTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_force_compilation_parallelism(1); return debug_options; } }; TEST_F(KernelCacheTestSingleThreaded, CacheIsGenerated) { EXPECT_FALSE(CacheFileExists()); EXPECT_TRUE(Run(kHloText, false)); EXPECT_EQ(CacheEntryCount(), 1); EXPECT_TRUE(Run(kHloText, false)); EXPECT_EQ(CacheEntryCount(), 1); } class NoKernelCacheTest : public KernelCacheTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = KernelCacheTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_llvm_module_compilation_parallelism(false); return debug_options; } }; TEST_F(NoKernelCacheTest, NoCacheWithoutCompilationParallelism) { EXPECT_TRUE(Run(kHloText, false)); EXPECT_FALSE(CacheFileExists()); } TEST_F(GpuCompilerTest, TestFlag_xla_gpu_unsafe_pipelined_loop_annotator) { const char* hlo = R"( HloModule test, entry_computation_layout={()->(s32[], s32[])} %Body (param: (s32[], s32[])) -> (s32[], s32[]) { %param = (s32[], s32[]) parameter(0) %i = s32[] get-tuple-element((s32[], s32[]) %param), index=1 %one = s32[] constant(1) %i_plus_one = s32[] add(s32[] %i, s32[] %one) %permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}} ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one) } %Cond (param.1: (s32[], s32[])) -> pred[] { %param.1 = (s32[], s32[]) parameter(0) %i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1 %trip_count = s32[] constant(10) ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT } ENTRY %test () -> (s32[], s32[]) { %i_start = s32[] constant(0) %p_start = s32[] constant(0) %initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start) ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"} })"; const char* kExpected = R"( )"; DebugOptions debug_options; HloModuleConfig config; debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true); config.set_debug_options(debug_options); config.set_num_partitions(4); config.set_use_spmd_partitioning(true); TF_ASSERT_OK_AND_ASSIGN(auto unoptimized_module, ParseAndReturnVerifiedModule(hlo, config)); TF_ASSERT_OK_AND_ASSIGN(auto optimized_module, GetOptimizedModule(std::move(unoptimized_module))); HloPrintOptions options; options.set_print_operand_shape(false); options.set_print_result_shape(false); TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matched, RunFileCheck(optimized_module->ToString(options), kExpected)); EXPECT_TRUE(filecheck_matched); } using GpuCompilerPassTest = GpuCompilerTest; TEST_F(GpuCompilerPassTest, GpuCompilerRunsTritonGemmRewriterByDefaultFromAmpere) { if (std::holds_alternative<se::RocmComputeCapability>(GpuComputeComp())) { GTEST_SKIP() << "TritonGemmRewriter disabled for ROCm until autotuner " << "is included."; } auto cc = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); bool is_rocm = std::holds_alternative<stream_executor::RocmComputeCapability>( backend() .default_stream_executor() ->GetDeviceDescription() .gpu_compute_capability()); bool expect_triton_gemm_rewriter_has_run = cc.IsAtLeastAmpere() || is_rocm; constexpr absl::string_view constant_module = R"( HloModule noop ENTRY main { ROOT constant = f32[] constant(0) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(constant_module)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(std::move(module))); const HloModuleMetadataProto& module_metadata = optimized_module->metadata()->proto(); bool triton_gemm_rewriter_has_run = false; for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) { triton_gemm_rewriter_has_run |= pass_metadata.pass_name() == "triton-gemm-rewriter"; } EXPECT_EQ(triton_gemm_rewriter_has_run, expect_triton_gemm_rewriter_has_run); } TEST_F(GpuCompilerPassTest, GpuCompilerRunsCustomKernelFusionByDefaultFromVolta) { auto cc = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); bool expect_custom_kernel_fusion_rewriter_has_run = cc.major == se::CudaComputeCapability::VOLTA; constexpr absl::string_view constant_module = R"( HloModule noop ENTRY main { ROOT constant = f32[] constant(0) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(constant_module)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(std::move(module))); const HloModuleMetadataProto& module_metadata = optimized_module->metadata()->proto(); bool custom_kernel_fusion_rewriter_has_run = false; for (const HloPassMetadata& pass_metadata : module_metadata.pass_metadata()) { custom_kernel_fusion_rewriter_has_run |= pass_metadata.pass_name() == "custom-kernel-fusion-rewriter"; } EXPECT_EQ(custom_kernel_fusion_rewriter_has_run, expect_custom_kernel_fusion_rewriter_has_run); } struct PassRunIndex { int first_run = std::numeric_limits<int>::max(); int last_run = std::numeric_limits<int>::min(); }; void VerifyPassOrder( const absl::flat_hash_map<std::string, PassRunIndex>& passes, absl::string_view before, absl::string_view after) { ASSERT_TRUE(passes.contains(before)) << "Expected pass did not run: " << before; ASSERT_TRUE(passes.contains(after)) << "Expected pass did not run: " << after; EXPECT_LT(passes.at(before).last_run, passes.at(after).first_run) << "Pass " << before << " ran after " << after; } absl::flat_hash_map<std::string, PassRunIndex> GatherPassOrderInformation( const HloModule& module) { absl::flat_hash_map<std::string, PassRunIndex> passes; int run_index = 0; for (const HloPassMetadata& pass_metadata : module.metadata().proto().pass_metadata()) { auto& pass = passes[pass_metadata.pass_name()]; pass.first_run = std::min(pass.first_run, run_index); pass.last_run = std::max(pass.last_run, run_index); ++run_index; } return passes; } TEST_F(GpuCompilerPassTest, PassesAreRunInCorrectOrder) { constexpr absl::string_view constant_module = R"( ENTRY main { ROOT constant = f32[] constant(0) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(constant_module)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(std::move(module))); absl::flat_hash_map<std::string, PassRunIndex> passes = GatherPassOrderInformation(*optimized_module); VerifyPassOrder(passes, "layout-assignment", "priority-fusion"); VerifyPassOrder(passes, "layout-assignment", "layout_normalization"); VerifyPassOrder(passes, "host-offload-legalize", "layout_normalization"); } TEST_F(GpuCompilerPassTest, FusionBlockLevelRewriterRunsAfterAllFusionPasses) { auto cc = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); if (!cc.IsAtLeastAmpere()) { GTEST_SKIP() << "FusionBlockLevelRewriter requires Ampere+ to run."; } constexpr absl::string_view constant_module = R"( ENTRY main { ROOT constant = f32[] constant(0) })"; HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_experimental_enable_fusion_block_level_rewriter( true); config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(constant_module, config)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(std::move(module))); absl::flat_hash_map<std::string, PassRunIndex> passes = GatherPassOrderInformation(*optimized_module); absl::string_view kFusionBlockLevelRewriterName = "fusion-block-level-rewriter"; for (const auto& [pass_name, _] : passes) { if (pass_name != kFusionBlockLevelRewriterName && absl::StrContains(pass_name, "fusion")) { VerifyPassOrder(passes, pass_name, kFusionBlockLevelRewriterName); VLOG(2) << "Verified pass order: " << pass_name << " -> " << kFusionBlockLevelRewriterName; } } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_compiler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea